4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright (c) 2015, Joyent, Inc. All rights reserved.
28 /* Copyright (c) 2013, OmniTI Computer Consulting, Inc. All rights reserved. */
31 #include "thr_uberdata.h"
37 #include <sys/socket.h>
42 #pragma weak lockf64 = lockf
43 #pragma weak openat64 = openat
44 #pragma weak open64 = open
45 #pragma weak creat64 = creat
46 #pragma weak pread64 = pread
47 #pragma weak preadv64 = preadv
48 #pragma weak pwrite64 = pwrite
49 #pragma weak pwritev64 = pwritev
52 * atfork_lock protects the pthread_atfork() data structures.
54 * fork_lock does double-duty. Not only does it (and atfork_lock)
55 * serialize calls to fork() and forkall(), but it also serializes calls
56 * to thr_suspend() and thr_continue() (because fork() and forkall() also
57 * suspend and continue other threads and they want no competition).
59 * Functions called in dlopen()ed L10N objects can do anything, including
60 * call malloc() and free(). Such calls are not fork-safe when protected
61 * by an ordinary mutex that is acquired in libc's prefork processing
62 * because, with an interposed malloc library present, there would be a
63 * lock ordering violation due to the pthread_atfork() prefork function
64 * in the interposition library acquiring its malloc lock(s) before the
65 * ordinary mutex in libc being acquired by libc's prefork functions.
67 * Within libc, calls to malloc() and free() are fork-safe if the calls
68 * are made while holding no other libc locks. This covers almost all
69 * of libc's malloc() and free() calls. For those libc code paths, such
70 * as the above-mentioned L10N calls, that require serialization and that
71 * may call malloc() or free(), libc uses callout_lock_enter() to perform
72 * the serialization. This works because callout_lock is not acquired as
73 * part of running the pthread_atfork() prefork handlers (to avoid the
74 * lock ordering violation described above). Rather, it is simply
75 * reinitialized in postfork1_child() to cover the case that some
76 * now-defunct thread might have been suspended while holding it.
82 ASSERT(curthread
->ul_critical
== 0);
83 (void) mutex_lock(&curthread
->ul_uberdata
->fork_lock
);
89 ASSERT(curthread
->ul_critical
== 0);
90 (void) mutex_unlock(&curthread
->ul_uberdata
->fork_lock
);
94 * Use cancel_safe_mutex_lock() to protect against being cancelled while
95 * holding callout_lock and calling outside of libc (via L10N plugins).
96 * We will honor a pending cancellation request when callout_lock_exit()
97 * is called, by calling cancel_safe_mutex_unlock().
100 callout_lock_enter(void)
102 ASSERT(curthread
->ul_critical
== 0);
103 cancel_safe_mutex_lock(&curthread
->ul_uberdata
->callout_lock
);
107 callout_lock_exit(void)
109 ASSERT(curthread
->ul_critical
== 0);
110 cancel_safe_mutex_unlock(&curthread
->ul_uberdata
->callout_lock
);
116 ulwp_t
*self
= curthread
;
117 uberdata_t
*udp
= self
->ul_uberdata
;
120 if (self
->ul_vfork
) {
122 * We are a child of vfork(); omit all of the fork
123 * logic and go straight to the system call trap.
124 * A vfork() child of a multithreaded parent
125 * must never call fork().
127 if (udp
->uberflags
.uf_mt
) {
131 pid
= __forkx(flags
);
132 if (pid
== 0) { /* child */
142 * Cannot call fork() from a fork handler.
151 * The functions registered by pthread_atfork() are defined by
152 * the application and its libraries and we must not hold any
153 * internal lmutex_lock()-acquired locks while invoking them.
154 * We hold only udp->atfork_lock to protect the atfork linkages.
155 * If one of these pthread_atfork() functions attempts to fork
156 * or to call pthread_atfork(), libc will detect the error and
157 * fail the call with EDEADLK. Otherwise, the pthread_atfork()
158 * functions are free to do anything they please (except they
159 * will not receive any signals).
161 (void) mutex_lock(&udp
->atfork_lock
);
164 * Posix (SUSv3) requires fork() to be async-signal-safe.
165 * This cannot be made to happen with fork handlers in place
166 * (they grab locks). To be in nominal compliance, don't run
167 * any fork handlers if we are called within a signal context.
168 * This leaves the child process in a questionable state with
169 * respect to its locks, but at least the parent process does
170 * not become deadlocked due to the calling thread attempting
171 * to acquire a lock that it already owns.
173 if (self
->ul_siglink
== NULL
)
177 * Block every other thread attempting thr_suspend() or thr_continue().
179 (void) mutex_lock(&udp
->fork_lock
);
183 * Just deferring them via sigoff() is not enough.
184 * We have to avoid taking a deferred signal in the child
185 * that was actually sent to the parent before __forkx().
187 block_all_signals(self
);
190 * This suspends all threads but this one, leaving them
191 * suspended outside of any critical regions in the library.
192 * Thus, we are assured that no lmutex_lock()-acquired library
193 * locks are held while we invoke fork() from the current thread.
197 pid
= __forkx(flags
);
199 if (pid
== 0) { /* child */
201 * Clear our schedctl pointer.
202 * Discard any deferred signal that was sent to the parent.
203 * Because we blocked all signals before __forkx(), a
204 * deferred signal cannot have been taken by the child.
206 self
->ul_schedctl_called
= NULL
;
207 self
->ul_schedctl
= NULL
;
209 self
->ul_siginfo
.si_signo
= 0;
211 /* reset the library's data structures to reflect one thread */
214 restore_signals(self
);
215 (void) mutex_unlock(&udp
->fork_lock
);
216 if (self
->ul_siglink
== NULL
)
217 _postfork_child_handler();
219 /* restart all threads that were suspended for fork() */
221 restore_signals(self
);
222 (void) mutex_unlock(&udp
->fork_lock
);
223 if (self
->ul_siglink
== NULL
)
224 _postfork_parent_handler();
227 (void) mutex_unlock(&udp
->atfork_lock
);
235 * fork() is fork1() for both Posix threads and Solaris threads.
236 * The forkall() interface exists for applications that require
237 * the semantics of replicating all threads.
239 #pragma weak fork1 = fork
247 * Much of the logic here is the same as in forkx().
248 * See the comments in forkx(), above.
253 ulwp_t
*self
= curthread
;
254 uberdata_t
*udp
= self
->ul_uberdata
;
257 if (self
->ul_vfork
) {
258 if (udp
->uberflags
.uf_mt
) {
262 pid
= __forkallx(flags
);
263 if (pid
== 0) { /* child */
277 (void) mutex_lock(&udp
->atfork_lock
);
278 (void) mutex_lock(&udp
->fork_lock
);
279 block_all_signals(self
);
282 pid
= __forkallx(flags
);
285 self
->ul_schedctl_called
= NULL
;
286 self
->ul_schedctl
= NULL
;
288 self
->ul_siginfo
.si_signo
= 0;
295 restore_signals(self
);
296 (void) mutex_unlock(&udp
->fork_lock
);
297 (void) mutex_unlock(&udp
->atfork_lock
);
307 return (forkallx(0));
311 * For the implementation of cancellation at cancellation points.
315 ulwp_t *self = curthread; \
317 (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks | \
318 self->ul_critical | self->ul_sigdefer); \
320 if (nocancel == 0) { \
321 self->ul_save_async = self->ul_cancel_async; \
322 if (!self->ul_cancel_disabled) { \
323 self->ul_cancel_async = 1; \
324 if (self->ul_cancel_pending) \
325 pthread_exit(PTHREAD_CANCELED); \
327 self->ul_sp = stkptr(); \
328 } else if (self->ul_cancel_pending && \
329 !self->ul_cancel_disabled) { \
330 set_cancel_eintr_flag(self); \
335 if (nocancel == 0) { \
337 self->ul_cancel_async = self->ul_save_async; \
342 * Perform the body of the action required by most of the cancelable
343 * function calls. The return(function_call) part is to allow the
344 * compiler to make the call be executed with tail recursion, which
345 * saves a register window on sparc and slightly (not much) improves
346 * the code for x86/x64 compilations.
348 #define PERFORM(function_call) \
351 *self->ul_errnop = EINTR; \
355 return (function_call); \
356 rv = function_call; \
361 * Specialized prologue for sigsuspend() and pollsys().
362 * These system calls pass a signal mask to the kernel.
363 * The kernel replaces the thread's signal mask with the
364 * temporary mask before the thread goes to sleep. If
365 * a signal is received, the signal handler will execute
366 * with the temporary mask, as modified by the sigaction
367 * for the particular signal.
369 * We block all signals until we reach the kernel with the
370 * temporary mask. This eliminates race conditions with
371 * setting the signal mask while signals are being posted.
373 #define PROLOGUE_MASK(sigmask) \
375 ulwp_t *self = curthread; \
377 (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks | \
378 self->ul_critical | self->ul_sigdefer); \
379 if (!self->ul_vfork) { \
381 block_all_signals(self); \
382 self->ul_tmpmask = *sigmask; \
383 delete_reserved_signals(&self->ul_tmpmask); \
384 self->ul_sigsuspend = 1; \
386 if (nocancel == 0) { \
387 self->ul_save_async = self->ul_cancel_async; \
388 if (!self->ul_cancel_disabled) { \
389 self->ul_cancel_async = 1; \
390 if (self->ul_cancel_pending) { \
391 if (self->ul_sigsuspend) { \
392 self->ul_sigsuspend = 0;\
393 restore_signals(self); \
395 pthread_exit(PTHREAD_CANCELED); \
398 self->ul_sp = stkptr(); \
403 * If a signal is taken, we return from the system call wrapper with
404 * our original signal mask restored (see code in call_user_handler()).
405 * If not (self->ul_sigsuspend is still non-zero), we must restore our
406 * original signal mask ourself.
408 #define EPILOGUE_MASK \
409 if (nocancel == 0) { \
411 self->ul_cancel_async = self->ul_save_async; \
413 if (self->ul_sigsuspend) { \
414 self->ul_sigsuspend = 0; \
415 restore_signals(self); \
420 * Cancellation prologue and epilogue functions,
421 * for cancellation points too complex to include here.
424 _cancel_prologue(void)
426 ulwp_t
*self
= curthread
;
428 self
->ul_cancel_prologue
=
429 (self
->ul_vfork
| self
->ul_nocancel
| self
->ul_libc_locks
|
430 self
->ul_critical
| self
->ul_sigdefer
) != 0;
431 if (self
->ul_cancel_prologue
== 0) {
432 self
->ul_save_async
= self
->ul_cancel_async
;
433 if (!self
->ul_cancel_disabled
) {
434 self
->ul_cancel_async
= 1;
435 if (self
->ul_cancel_pending
)
436 pthread_exit(PTHREAD_CANCELED
);
438 self
->ul_sp
= stkptr();
439 } else if (self
->ul_cancel_pending
&&
440 !self
->ul_cancel_disabled
) {
441 set_cancel_eintr_flag(self
);
446 _cancel_epilogue(void)
448 ulwp_t
*self
= curthread
;
450 if (self
->ul_cancel_prologue
== 0) {
452 self
->ul_cancel_async
= self
->ul_save_async
;
457 * Called from _thrp_join() (thr_join() is a cancellation point)
460 lwp_wait(thread_t tid
, thread_t
*found
)
467 while ((error
= __lwp_wait(tid
, found
)) == EINTR
&& !cancel_active())
474 read(int fd
, void *buf
, size_t size
)
476 extern ssize_t
__read(int, void *, size_t);
479 PERFORM(__read(fd
, buf
, size
))
483 write(int fd
, const void *buf
, size_t size
)
485 extern ssize_t
__write(int, const void *, size_t);
488 PERFORM(__write(fd
, buf
, size
))
492 getmsg(int fd
, struct strbuf
*ctlptr
, struct strbuf
*dataptr
,
495 extern int __getmsg(int, struct strbuf
*, struct strbuf
*, int *);
498 PERFORM(__getmsg(fd
, ctlptr
, dataptr
, flagsp
))
502 getpmsg(int fd
, struct strbuf
*ctlptr
, struct strbuf
*dataptr
,
503 int *bandp
, int *flagsp
)
505 extern int __getpmsg(int, struct strbuf
*, struct strbuf
*,
509 PERFORM(__getpmsg(fd
, ctlptr
, dataptr
, bandp
, flagsp
))
512 #pragma weak __xpg4_putmsg = putmsg
513 #pragma weak __xpg4_putpmsg = putpmsg
516 putmsg(int fd
, const struct strbuf
*ctlptr
,
517 const struct strbuf
*dataptr
, int flags
)
519 extern int __putmsg(int, const struct strbuf
*,
520 const struct strbuf
*, int);
523 PERFORM(__putmsg(fd
, ctlptr
, dataptr
, flags
|MSG_XPG4
))
527 putpmsg(int fd
, const struct strbuf
*ctlptr
,
528 const struct strbuf
*dataptr
, int band
, int flags
)
530 extern int __putpmsg(int, const struct strbuf
*,
531 const struct strbuf
*, int, int);
534 PERFORM(__putpmsg(fd
, ctlptr
, dataptr
, band
, flags
|MSG_XPG4
))
538 nanosleep(const timespec_t
*rqtp
, timespec_t
*rmtp
)
543 error
= abort
? EINTR
: __nanosleep(rqtp
, rmtp
);
553 clock_nanosleep(clockid_t clock_id
, int flags
,
554 const timespec_t
*rqtp
, timespec_t
*rmtp
)
564 case CLOCK_PROCESS_CPUTIME_ID
:
565 case CLOCK_THREAD_CPUTIME_ID
:
573 if (flags
& TIMER_ABSTIME
) {
574 abstime_to_reltime(clock_id
, rqtp
, &reltime
);
578 if (clock_id
== CLOCK_HIGHRES
)
583 error
= abort
? EINTR
: __nanosleep(&reltime
, rmtp
);
585 if (error
== 0 && clock_id
== CLOCK_HIGHRES
) {
587 * Don't return yet if we didn't really get a timeout.
588 * This can happen if we return because someone resets
591 if (flags
& TIMER_ABSTIME
) {
592 if ((hrtime_t
)(uint32_t)rqtp
->tv_sec
* NANOSEC
+
593 rqtp
->tv_nsec
> gethrtime()) {
594 abstime_to_reltime(clock_id
, rqtp
, &reltime
);
598 rqlapse
= (hrtime_t
)(uint32_t)rqtp
->tv_sec
* NANOSEC
+
600 lapse
= gethrtime() - start
;
601 if (rqlapse
> lapse
) {
602 hrt2ts(rqlapse
- lapse
, &reltime
);
607 if (error
== 0 && clock_id
== CLOCK_REALTIME
&&
608 (flags
& TIMER_ABSTIME
)) {
610 * Don't return yet just because someone reset the
611 * system clock. Recompute the new relative time
612 * and reissue the nanosleep() call if necessary.
614 * Resetting the system clock causes all sorts of
615 * problems and the SUSV3 standards body should
616 * have made the behavior of clock_nanosleep() be
617 * implementation-defined in such a case rather than
618 * being specific about honoring the new system time.
619 * Standards bodies are filled with fools and idiots.
621 abstime_to_reltime(clock_id
, rqtp
, &reltime
);
622 if (reltime
.tv_sec
!= 0 || reltime
.tv_nsec
!= 0)
629 sleep(unsigned int sec
)
631 unsigned int rem
= 0;
635 ts
.tv_sec
= (time_t)sec
;
637 if (nanosleep(&ts
, &tsr
) == -1 && errno
== EINTR
) {
638 rem
= (unsigned int)tsr
.tv_sec
;
639 if (tsr
.tv_nsec
>= NANOSEC
/ 2)
646 usleep(useconds_t usec
)
650 ts
.tv_sec
= usec
/ MICROSEC
;
651 ts
.tv_nsec
= (long)(usec
% MICROSEC
) * 1000;
652 (void) nanosleep(&ts
, NULL
);
659 extern void _aio_close(int);
660 extern int __close(int);
664 * If we call _aio_close() while in a critical region,
665 * we will draw an ASSERT() failure, so don't do it.
666 * No calls to close() from within libc need _aio_close();
667 * only the application's calls to close() need this,
668 * and such calls are never from a libc critical region.
670 if (curthread
->ul_critical
== 0)
672 PERFORM(__close(fildes
))
676 door_call(int d
, door_arg_t
*params
)
678 extern int __door_call(int, door_arg_t
*);
681 PERFORM(__door_call(d
, params
))
685 fcntl(int fildes
, int cmd
, ...)
687 extern int __fcntl(int, int, ...);
693 arg
= va_arg(ap
, intptr_t);
696 return (__fcntl(fildes
, cmd
, arg
));
697 PERFORM(__fcntl(fildes
, cmd
, arg
))
701 fdatasync(int fildes
)
703 extern int __fdsync(int, int);
706 PERFORM(__fdsync(fildes
, O_DSYNC
))
712 extern int __fdsync(int, int);
715 PERFORM(__fdsync(fildes
, O_SYNC
))
719 lockf(int fildes
, int function
, off_t size
)
721 extern int __lockf(int, int, off_t
);
724 PERFORM(__lockf(fildes
, function
, size
))
728 msgrcv(int msqid
, void *msgp
, size_t msgsz
, long msgtyp
, int msgflg
)
730 extern ssize_t
__msgrcv(int, void *, size_t, long, int);
733 PERFORM(__msgrcv(msqid
, msgp
, msgsz
, msgtyp
, msgflg
))
737 msgsnd(int msqid
, const void *msgp
, size_t msgsz
, int msgflg
)
739 extern int __msgsnd(int, const void *, size_t, int);
742 PERFORM(__msgsnd(msqid
, msgp
, msgsz
, msgflg
))
746 msync(void *addr
, size_t len
, int flags
)
748 extern int __msync(void *, size_t, int);
751 PERFORM(__msync(addr
, len
, flags
))
755 openat(int fd
, const char *path
, int oflag
, ...)
762 mode
= va_arg(ap
, mode_t
);
764 PERFORM(__openat(fd
, path
, oflag
, mode
))
768 open(const char *path
, int oflag
, ...)
775 mode
= va_arg(ap
, mode_t
);
777 PERFORM(__open(path
, oflag
, mode
))
781 creat(const char *path
, mode_t mode
)
783 return (open(path
, O_WRONLY
| O_CREAT
| O_TRUNC
, mode
));
789 extern int __pause(void);
796 pread(int fildes
, void *buf
, size_t nbyte
, off_t offset
)
798 extern ssize_t
__pread(int, void *, size_t, off_t
);
801 PERFORM(__pread(fildes
, buf
, nbyte
, offset
))
805 preadv(int fildes
, const struct iovec
*iov
, int iovcnt
, off_t offset
)
808 extern ssize_t
__preadv(int, const struct iovec
*, int, off_t
, off_t
);
811 PERFORM(__preadv(fildes
, iov
, iovcnt
, offset
, 0))
814 pwrite(int fildes
, const void *buf
, size_t nbyte
, off_t offset
)
816 extern ssize_t
__pwrite(int, const void *, size_t, off_t
);
819 PERFORM(__pwrite(fildes
, buf
, nbyte
, offset
))
823 pwritev(int fildes
, const struct iovec
*iov
, int iovcnt
, off_t offset
)
825 extern ssize_t
__pwritev(int, const struct iovec
*, int, off_t
, off_t
);
828 PERFORM(__pwritev(fildes
, iov
, iovcnt
, offset
, 0))
832 readv(int fildes
, const struct iovec
*iov
, int iovcnt
)
834 extern ssize_t
__readv(int, const struct iovec
*, int);
837 PERFORM(__readv(fildes
, iov
, iovcnt
))
843 extern int __sigpause(int);
846 PERFORM(__sigpause(sig
))
850 sigsuspend(const sigset_t
*set
)
852 extern int __sigsuspend(const sigset_t
*);
856 rv
= __sigsuspend(set
);
862 _pollsys(struct pollfd
*fds
, nfds_t nfd
, const timespec_t
*timeout
,
863 const sigset_t
*sigmask
)
865 extern int __pollsys(struct pollfd
*, nfds_t
, const timespec_t
*,
869 PROLOGUE_MASK(sigmask
)
870 rv
= __pollsys(fds
, nfd
, timeout
, sigmask
);
876 sigtimedwait(const sigset_t
*set
, siginfo_t
*infop
, const timespec_t
*timeout
)
878 extern int __sigtimedwait(const sigset_t
*, siginfo_t
*,
885 *self
->ul_errnop
= EINTR
;
888 sig
= __sigtimedwait(set
, &info
, timeout
);
889 if (sig
== SIGCANCEL
&&
890 (SI_FROMKERNEL(&info
) || info
.si_code
== SI_LWP
)) {
892 *self
->ul_errnop
= EINTR
;
897 if (sig
!= -1 && infop
)
898 (void) memcpy(infop
, &info
, sizeof (*infop
));
902 #pragma weak __posix_sigwait = sigwait
905 sigwait(const sigset_t
*set
, int *sig
)
907 if ((*sig
= sigtimedwait(set
, NULL
, NULL
)) != -1)
915 sigwaitinfo(const sigset_t
*set
, siginfo_t
*info
)
917 return (sigtimedwait(set
, info
, NULL
));
921 sigqueue(pid_t pid
, int signo
, const union sigval value
)
923 extern int __sigqueue(pid_t pid
, int signo
,
924 /* const union sigval */ void *value
, int si_code
, int block
);
925 return (__sigqueue(pid
, signo
, value
.sival_ptr
, SI_QUEUE
, 0));
929 _so_accept(int sock
, struct sockaddr
*addr
, uint_t
*addrlen
, int version
,
932 extern int __so_accept(int, struct sockaddr
*, uint_t
*, int, int);
935 PERFORM(__so_accept(sock
, addr
, addrlen
, version
, flags
))
939 _so_connect(int sock
, struct sockaddr
*addr
, uint_t addrlen
, int version
)
941 extern int __so_connect(int, struct sockaddr
*, uint_t
, int);
944 PERFORM(__so_connect(sock
, addr
, addrlen
, version
))
948 _so_recv(int sock
, void *buf
, size_t len
, int flags
)
950 extern int __so_recv(int, void *, size_t, int);
953 PERFORM(__so_recv(sock
, buf
, len
, flags
))
957 _so_recvfrom(int sock
, void *buf
, size_t len
, int flags
,
958 struct sockaddr
*addr
, int *addrlen
)
960 extern int __so_recvfrom(int, void *, size_t, int,
961 struct sockaddr
*, int *);
964 PERFORM(__so_recvfrom(sock
, buf
, len
, flags
, addr
, addrlen
))
968 _so_recvmsg(int sock
, struct msghdr
*msg
, int flags
)
970 extern int __so_recvmsg(int, struct msghdr
*, int);
973 PERFORM(__so_recvmsg(sock
, msg
, flags
))
977 _so_send(int sock
, const void *buf
, size_t len
, int flags
)
979 extern int __so_send(int, const void *, size_t, int);
982 PERFORM(__so_send(sock
, buf
, len
, flags
))
986 _so_sendmsg(int sock
, const struct msghdr
*msg
, int flags
)
988 extern int __so_sendmsg(int, const struct msghdr
*, int);
991 PERFORM(__so_sendmsg(sock
, msg
, flags
))
995 _so_sendto(int sock
, const void *buf
, size_t len
, int flags
,
996 const struct sockaddr
*addr
, int *addrlen
)
998 extern int __so_sendto(int, const void *, size_t, int,
999 const struct sockaddr
*, int *);
1002 PERFORM(__so_sendto(sock
, buf
, len
, flags
, addr
, addrlen
))
1008 extern int __tcdrain(int);
1011 PERFORM(__tcdrain(fildes
))
1015 waitid(idtype_t idtype
, id_t id
, siginfo_t
*infop
, int options
)
1017 extern int __waitid(idtype_t
, id_t
, siginfo_t
*, int);
1020 if (options
& WNOHANG
)
1021 return (__waitid(idtype
, id
, infop
, options
));
1022 PERFORM(__waitid(idtype
, id
, infop
, options
))
1026 writev(int fildes
, const struct iovec
*iov
, int iovcnt
)
1028 extern ssize_t
__writev(int, const struct iovec
*, int);
1031 PERFORM(__writev(fildes
, iov
, iovcnt
))