import less(1)
[unleashed/tickless.git] / usr / src / lib / libc / port / threads / scalls.c
blob84d6c495b45e5bebc40f006dbb2969696bad1f23
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright (c) 2015, Joyent, Inc. All rights reserved.
28 /* Copyright (c) 2013, OmniTI Computer Consulting, Inc. All rights reserved. */
30 #include "lint.h"
31 #include "thr_uberdata.h"
32 #include <stdarg.h>
33 #include <poll.h>
34 #include <stropts.h>
35 #include <dlfcn.h>
36 #include <wait.h>
37 #include <sys/socket.h>
38 #include <sys/uio.h>
39 #include <sys/file.h>
40 #include <sys/door.h>
43 * atfork_lock protects the pthread_atfork() data structures.
45 * fork_lock does double-duty. Not only does it (and atfork_lock)
46 * serialize calls to fork() and forkall(), but it also serializes calls
47 * to thr_suspend() and thr_continue() (because fork() and forkall() also
48 * suspend and continue other threads and they want no competition).
50 * Functions called in dlopen()ed L10N objects can do anything, including
51 * call malloc() and free(). Such calls are not fork-safe when protected
52 * by an ordinary mutex that is acquired in libc's prefork processing
53 * because, with an interposed malloc library present, there would be a
54 * lock ordering violation due to the pthread_atfork() prefork function
55 * in the interposition library acquiring its malloc lock(s) before the
56 * ordinary mutex in libc being acquired by libc's prefork functions.
58 * Within libc, calls to malloc() and free() are fork-safe if the calls
59 * are made while holding no other libc locks. This covers almost all
60 * of libc's malloc() and free() calls. For those libc code paths, such
61 * as the above-mentioned L10N calls, that require serialization and that
62 * may call malloc() or free(), libc uses callout_lock_enter() to perform
63 * the serialization. This works because callout_lock is not acquired as
64 * part of running the pthread_atfork() prefork handlers (to avoid the
65 * lock ordering violation described above). Rather, it is simply
66 * reinitialized in postfork1_child() to cover the case that some
67 * now-defunct thread might have been suspended while holding it.
70 void
71 fork_lock_enter(void)
73 ASSERT(curthread->ul_critical == 0);
74 (void) mutex_lock(&curthread->ul_uberdata->fork_lock);
77 void
78 fork_lock_exit(void)
80 ASSERT(curthread->ul_critical == 0);
81 (void) mutex_unlock(&curthread->ul_uberdata->fork_lock);
85 * Use cancel_safe_mutex_lock() to protect against being cancelled while
86 * holding callout_lock and calling outside of libc (via L10N plugins).
87 * We will honor a pending cancellation request when callout_lock_exit()
88 * is called, by calling cancel_safe_mutex_unlock().
90 void
91 callout_lock_enter(void)
93 ASSERT(curthread->ul_critical == 0);
94 cancel_safe_mutex_lock(&curthread->ul_uberdata->callout_lock);
97 void
98 callout_lock_exit(void)
100 ASSERT(curthread->ul_critical == 0);
101 cancel_safe_mutex_unlock(&curthread->ul_uberdata->callout_lock);
104 pid_t
105 forkx(int flags)
107 ulwp_t *self = curthread;
108 uberdata_t *udp = self->ul_uberdata;
109 pid_t pid;
111 if (self->ul_vfork) {
113 * We are a child of vfork(); omit all of the fork
114 * logic and go straight to the system call trap.
115 * A vfork() child of a multithreaded parent
116 * must never call fork().
118 if (udp->uberflags.uf_mt) {
119 errno = ENOTSUP;
120 return (-1);
122 pid = __forkx(flags);
123 if (pid == 0) { /* child */
124 udp->pid = getpid();
125 self->ul_vfork = 0;
127 return (pid);
130 sigoff(self);
131 if (self->ul_fork) {
133 * Cannot call fork() from a fork handler.
135 sigon(self);
136 errno = EDEADLK;
137 return (-1);
139 self->ul_fork = 1;
142 * The functions registered by pthread_atfork() are defined by
143 * the application and its libraries and we must not hold any
144 * internal lmutex_lock()-acquired locks while invoking them.
145 * We hold only udp->atfork_lock to protect the atfork linkages.
146 * If one of these pthread_atfork() functions attempts to fork
147 * or to call pthread_atfork(), libc will detect the error and
148 * fail the call with EDEADLK. Otherwise, the pthread_atfork()
149 * functions are free to do anything they please (except they
150 * will not receive any signals).
152 (void) mutex_lock(&udp->atfork_lock);
155 * Posix (SUSv3) requires fork() to be async-signal-safe.
156 * This cannot be made to happen with fork handlers in place
157 * (they grab locks). To be in nominal compliance, don't run
158 * any fork handlers if we are called within a signal context.
159 * This leaves the child process in a questionable state with
160 * respect to its locks, but at least the parent process does
161 * not become deadlocked due to the calling thread attempting
162 * to acquire a lock that it already owns.
164 if (self->ul_siglink == NULL)
165 _prefork_handler();
168 * Block every other thread attempting thr_suspend() or thr_continue().
170 (void) mutex_lock(&udp->fork_lock);
173 * Block all signals.
174 * Just deferring them via sigoff() is not enough.
175 * We have to avoid taking a deferred signal in the child
176 * that was actually sent to the parent before __forkx().
178 block_all_signals(self);
181 * This suspends all threads but this one, leaving them
182 * suspended outside of any critical regions in the library.
183 * Thus, we are assured that no lmutex_lock()-acquired library
184 * locks are held while we invoke fork() from the current thread.
186 suspend_fork();
188 pid = __forkx(flags);
190 if (pid == 0) { /* child */
192 * Clear our schedctl pointer.
193 * Discard any deferred signal that was sent to the parent.
194 * Because we blocked all signals before __forkx(), a
195 * deferred signal cannot have been taken by the child.
197 self->ul_schedctl_called = NULL;
198 self->ul_schedctl = NULL;
199 self->ul_cursig = 0;
200 self->ul_siginfo.si_signo = 0;
201 udp->pid = getpid();
202 /* reset the library's data structures to reflect one thread */
203 unregister_locks();
204 postfork1_child();
205 restore_signals(self);
206 (void) mutex_unlock(&udp->fork_lock);
207 if (self->ul_siglink == NULL)
208 _postfork_child_handler();
209 } else {
210 /* restart all threads that were suspended for fork() */
211 continue_fork(0);
212 restore_signals(self);
213 (void) mutex_unlock(&udp->fork_lock);
214 if (self->ul_siglink == NULL)
215 _postfork_parent_handler();
218 (void) mutex_unlock(&udp->atfork_lock);
219 self->ul_fork = 0;
220 sigon(self);
222 return (pid);
226 * fork() is fork1() for both Posix threads and Solaris threads.
227 * The forkall() interface exists for applications that require
228 * the semantics of replicating all threads.
230 #pragma weak fork1 = fork
231 pid_t
232 fork(void)
234 return (forkx(0));
238 * Much of the logic here is the same as in forkx().
239 * See the comments in forkx(), above.
241 pid_t
242 forkallx(int flags)
244 ulwp_t *self = curthread;
245 uberdata_t *udp = self->ul_uberdata;
246 pid_t pid;
248 if (self->ul_vfork) {
249 if (udp->uberflags.uf_mt) {
250 errno = ENOTSUP;
251 return (-1);
253 pid = __forkallx(flags);
254 if (pid == 0) { /* child */
255 udp->pid = getpid();
256 self->ul_vfork = 0;
258 return (pid);
261 sigoff(self);
262 if (self->ul_fork) {
263 sigon(self);
264 errno = EDEADLK;
265 return (-1);
267 self->ul_fork = 1;
268 (void) mutex_lock(&udp->atfork_lock);
269 (void) mutex_lock(&udp->fork_lock);
270 block_all_signals(self);
271 suspend_fork();
273 pid = __forkallx(flags);
275 if (pid == 0) {
276 self->ul_schedctl_called = NULL;
277 self->ul_schedctl = NULL;
278 self->ul_cursig = 0;
279 self->ul_siginfo.si_signo = 0;
280 udp->pid = getpid();
281 unregister_locks();
282 continue_fork(1);
283 } else {
284 continue_fork(0);
286 restore_signals(self);
287 (void) mutex_unlock(&udp->fork_lock);
288 (void) mutex_unlock(&udp->atfork_lock);
289 self->ul_fork = 0;
290 sigon(self);
292 return (pid);
295 pid_t
296 forkall(void)
298 return (forkallx(0));
302 * For the implementation of cancellation at cancellation points.
304 #define PROLOGUE \
306 ulwp_t *self = curthread; \
307 int nocancel = \
308 (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks | \
309 self->ul_critical | self->ul_sigdefer); \
310 int abort = 0; \
311 if (nocancel == 0) { \
312 self->ul_save_async = self->ul_cancel_async; \
313 if (!self->ul_cancel_disabled) { \
314 self->ul_cancel_async = 1; \
315 if (self->ul_cancel_pending) \
316 pthread_exit(PTHREAD_CANCELED); \
318 self->ul_sp = stkptr(); \
319 } else if (self->ul_cancel_pending && \
320 !self->ul_cancel_disabled) { \
321 set_cancel_eintr_flag(self); \
322 abort = 1; \
325 #define EPILOGUE \
326 if (nocancel == 0) { \
327 self->ul_sp = 0; \
328 self->ul_cancel_async = self->ul_save_async; \
333 * Perform the body of the action required by most of the cancelable
334 * function calls. The return(function_call) part is to allow the
335 * compiler to make the call be executed with tail recursion, which
336 * saves a register window on sparc and slightly (not much) improves
337 * the code for x86/x64 compilations.
339 #define PERFORM(function_call) \
340 PROLOGUE \
341 if (abort) { \
342 *self->ul_errnop = EINTR; \
343 return (-1); \
345 if (nocancel) \
346 return (function_call); \
347 rv = function_call; \
348 EPILOGUE \
349 return (rv);
352 * Specialized prologue for sigsuspend() and pollsys().
353 * These system calls pass a signal mask to the kernel.
354 * The kernel replaces the thread's signal mask with the
355 * temporary mask before the thread goes to sleep. If
356 * a signal is received, the signal handler will execute
357 * with the temporary mask, as modified by the sigaction
358 * for the particular signal.
360 * We block all signals until we reach the kernel with the
361 * temporary mask. This eliminates race conditions with
362 * setting the signal mask while signals are being posted.
364 #define PROLOGUE_MASK(sigmask) \
366 ulwp_t *self = curthread; \
367 int nocancel = \
368 (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks | \
369 self->ul_critical | self->ul_sigdefer); \
370 if (!self->ul_vfork) { \
371 if (sigmask) { \
372 block_all_signals(self); \
373 self->ul_tmpmask = *sigmask; \
374 delete_reserved_signals(&self->ul_tmpmask); \
375 self->ul_sigsuspend = 1; \
377 if (nocancel == 0) { \
378 self->ul_save_async = self->ul_cancel_async; \
379 if (!self->ul_cancel_disabled) { \
380 self->ul_cancel_async = 1; \
381 if (self->ul_cancel_pending) { \
382 if (self->ul_sigsuspend) { \
383 self->ul_sigsuspend = 0;\
384 restore_signals(self); \
386 pthread_exit(PTHREAD_CANCELED); \
389 self->ul_sp = stkptr(); \
394 * If a signal is taken, we return from the system call wrapper with
395 * our original signal mask restored (see code in call_user_handler()).
396 * If not (self->ul_sigsuspend is still non-zero), we must restore our
397 * original signal mask ourself.
399 #define EPILOGUE_MASK \
400 if (nocancel == 0) { \
401 self->ul_sp = 0; \
402 self->ul_cancel_async = self->ul_save_async; \
404 if (self->ul_sigsuspend) { \
405 self->ul_sigsuspend = 0; \
406 restore_signals(self); \
411 * Cancellation prologue and epilogue functions,
412 * for cancellation points too complex to include here.
414 void
415 _cancel_prologue(void)
417 ulwp_t *self = curthread;
419 self->ul_cancel_prologue =
420 (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks |
421 self->ul_critical | self->ul_sigdefer) != 0;
422 if (self->ul_cancel_prologue == 0) {
423 self->ul_save_async = self->ul_cancel_async;
424 if (!self->ul_cancel_disabled) {
425 self->ul_cancel_async = 1;
426 if (self->ul_cancel_pending)
427 pthread_exit(PTHREAD_CANCELED);
429 self->ul_sp = stkptr();
430 } else if (self->ul_cancel_pending &&
431 !self->ul_cancel_disabled) {
432 set_cancel_eintr_flag(self);
436 void
437 _cancel_epilogue(void)
439 ulwp_t *self = curthread;
441 if (self->ul_cancel_prologue == 0) {
442 self->ul_sp = 0;
443 self->ul_cancel_async = self->ul_save_async;
448 * Called from _thrp_join() (thr_join() is a cancellation point)
451 lwp_wait(thread_t tid, thread_t *found)
453 int error;
455 PROLOGUE
456 if (abort)
457 return (EINTR);
458 while ((error = __lwp_wait(tid, found)) == EINTR && !cancel_active())
459 continue;
460 EPILOGUE
461 return (error);
464 ssize_t
465 read(int fd, void *buf, size_t size)
467 extern ssize_t __read(int, void *, size_t);
468 ssize_t rv;
470 PERFORM(__read(fd, buf, size))
473 ssize_t
474 write(int fd, const void *buf, size_t size)
476 extern ssize_t __write(int, const void *, size_t);
477 ssize_t rv;
479 PERFORM(__write(fd, buf, size))
483 getmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr,
484 int *flagsp)
486 extern int __getmsg(int, struct strbuf *, struct strbuf *, int *);
487 int rv;
489 PERFORM(__getmsg(fd, ctlptr, dataptr, flagsp))
493 getpmsg(int fd, struct strbuf *ctlptr, struct strbuf *dataptr,
494 int *bandp, int *flagsp)
496 extern int __getpmsg(int, struct strbuf *, struct strbuf *,
497 int *, int *);
498 int rv;
500 PERFORM(__getpmsg(fd, ctlptr, dataptr, bandp, flagsp))
504 putmsg(int fd, const struct strbuf *ctlptr,
505 const struct strbuf *dataptr, int flags)
507 extern int __putmsg(int, const struct strbuf *,
508 const struct strbuf *, int);
509 int rv;
511 PERFORM(__putmsg(fd, ctlptr, dataptr, flags))
515 __xpg4_putmsg(int fd, const struct strbuf *ctlptr,
516 const struct strbuf *dataptr, int flags)
518 extern int __putmsg(int, const struct strbuf *,
519 const struct strbuf *, int);
520 int rv;
522 PERFORM(__putmsg(fd, ctlptr, dataptr, flags|MSG_XPG4))
526 putpmsg(int fd, const struct strbuf *ctlptr,
527 const struct strbuf *dataptr, int band, int flags)
529 extern int __putpmsg(int, const struct strbuf *,
530 const struct strbuf *, int, int);
531 int rv;
533 PERFORM(__putpmsg(fd, ctlptr, dataptr, band, flags))
537 __xpg4_putpmsg(int fd, const struct strbuf *ctlptr,
538 const struct strbuf *dataptr, int band, int flags)
540 extern int __putpmsg(int, const struct strbuf *,
541 const struct strbuf *, int, int);
542 int rv;
544 PERFORM(__putpmsg(fd, ctlptr, dataptr, band, flags|MSG_XPG4))
548 nanosleep(const timespec_t *rqtp, timespec_t *rmtp)
550 int error;
552 PROLOGUE
553 error = abort? EINTR : __nanosleep(rqtp, rmtp);
554 EPILOGUE
555 if (error) {
556 errno = error;
557 return (-1);
559 return (0);
563 clock_nanosleep(clockid_t clock_id, int flags,
564 const timespec_t *rqtp, timespec_t *rmtp)
566 timespec_t reltime;
567 hrtime_t start;
568 hrtime_t rqlapse;
569 hrtime_t lapse;
570 int error;
572 switch (clock_id) {
573 case CLOCK_VIRTUAL:
574 case CLOCK_PROCESS_CPUTIME_ID:
575 case CLOCK_THREAD_CPUTIME_ID:
576 return (ENOTSUP);
577 case CLOCK_REALTIME:
578 case CLOCK_HIGHRES:
579 break;
580 default:
581 return (EINVAL);
583 if (flags & TIMER_ABSTIME) {
584 abstime_to_reltime(clock_id, rqtp, &reltime);
585 rmtp = NULL;
586 } else {
587 reltime = *rqtp;
588 if (clock_id == CLOCK_HIGHRES)
589 start = gethrtime();
591 restart:
592 PROLOGUE
593 error = abort? EINTR : __nanosleep(&reltime, rmtp);
594 EPILOGUE
595 if (error == 0 && clock_id == CLOCK_HIGHRES) {
597 * Don't return yet if we didn't really get a timeout.
598 * This can happen if we return because someone resets
599 * the system clock.
601 if (flags & TIMER_ABSTIME) {
602 if ((hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC +
603 rqtp->tv_nsec > gethrtime()) {
604 abstime_to_reltime(clock_id, rqtp, &reltime);
605 goto restart;
607 } else {
608 rqlapse = (hrtime_t)(uint32_t)rqtp->tv_sec * NANOSEC +
609 rqtp->tv_nsec;
610 lapse = gethrtime() - start;
611 if (rqlapse > lapse) {
612 hrt2ts(rqlapse - lapse, &reltime);
613 goto restart;
617 if (error == 0 && clock_id == CLOCK_REALTIME &&
618 (flags & TIMER_ABSTIME)) {
620 * Don't return yet just because someone reset the
621 * system clock. Recompute the new relative time
622 * and reissue the nanosleep() call if necessary.
624 * Resetting the system clock causes all sorts of
625 * problems and the SUSV3 standards body should
626 * have made the behavior of clock_nanosleep() be
627 * implementation-defined in such a case rather than
628 * being specific about honoring the new system time.
629 * Standards bodies are filled with fools and idiots.
631 abstime_to_reltime(clock_id, rqtp, &reltime);
632 if (reltime.tv_sec != 0 || reltime.tv_nsec != 0)
633 goto restart;
635 return (error);
638 unsigned int
639 sleep(unsigned int sec)
641 unsigned int rem = 0;
642 timespec_t ts;
643 timespec_t tsr;
645 ts.tv_sec = (time_t)sec;
646 ts.tv_nsec = 0;
647 if (nanosleep(&ts, &tsr) == -1 && errno == EINTR) {
648 rem = (unsigned int)tsr.tv_sec;
649 if (tsr.tv_nsec >= NANOSEC / 2)
650 rem++;
652 return (rem);
656 usleep(useconds_t usec)
658 timespec_t ts;
660 ts.tv_sec = usec / MICROSEC;
661 ts.tv_nsec = (long)(usec % MICROSEC) * 1000;
662 (void) nanosleep(&ts, NULL);
663 return (0);
667 close(int fildes)
669 extern void _aio_close(int);
670 extern int __close(int);
671 int rv;
674 * If we call _aio_close() while in a critical region,
675 * we will draw an ASSERT() failure, so don't do it.
676 * No calls to close() from within libc need _aio_close();
677 * only the application's calls to close() need this,
678 * and such calls are never from a libc critical region.
680 if (curthread->ul_critical == 0)
681 _aio_close(fildes);
682 PERFORM(__close(fildes))
686 door_call(int d, door_arg_t *params)
688 extern int __door_call(int, door_arg_t *);
689 int rv;
691 PERFORM(__door_call(d, params))
695 fcntl(int fildes, int cmd, ...)
697 extern int __fcntl(int, int, ...);
698 intptr_t arg;
699 int rv;
700 va_list ap;
702 va_start(ap, cmd);
703 arg = va_arg(ap, intptr_t);
704 va_end(ap);
705 if (cmd != F_SETLKW)
706 return (__fcntl(fildes, cmd, arg));
707 PERFORM(__fcntl(fildes, cmd, arg))
711 fdatasync(int fildes)
713 extern int __fdsync(int, int);
714 int rv;
716 PERFORM(__fdsync(fildes, O_DSYNC))
720 fsync(int fildes)
722 extern int __fdsync(int, int);
723 int rv;
725 PERFORM(__fdsync(fildes, O_SYNC))
729 lockf(int fildes, int function, off_t size)
731 extern int __lockf(int, int, off_t);
732 int rv;
734 PERFORM(__lockf(fildes, function, size))
737 #if !defined(_LP64)
739 lockf64(int fildes, int function, off64_t size)
741 extern int __lockf64(int, int, off64_t);
742 int rv;
744 PERFORM(__lockf64(fildes, function, size))
746 #endif /* !_LP64 */
748 ssize_t
749 msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg)
751 extern ssize_t __msgrcv(int, void *, size_t, long, int);
752 ssize_t rv;
754 PERFORM(__msgrcv(msqid, msgp, msgsz, msgtyp, msgflg))
758 msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg)
760 extern int __msgsnd(int, const void *, size_t, int);
761 int rv;
763 PERFORM(__msgsnd(msqid, msgp, msgsz, msgflg))
767 msync(void *addr, size_t len, int flags)
769 extern int __msync(void *, size_t, int);
770 int rv;
772 PERFORM(__msync(addr, len, flags))
776 openat(int fd, const char *path, int oflag, ...)
778 mode_t mode;
779 int rv;
780 va_list ap;
782 va_start(ap, oflag);
783 mode = va_arg(ap, mode_t);
784 va_end(ap);
785 PERFORM(__openat(fd, path, oflag, mode))
789 open(const char *path, int oflag, ...)
791 mode_t mode;
792 int rv;
793 va_list ap;
795 va_start(ap, oflag);
796 mode = va_arg(ap, mode_t);
797 va_end(ap);
798 PERFORM(__open(path, oflag, mode))
802 creat(const char *path, mode_t mode)
804 return (open(path, O_WRONLY | O_CREAT | O_TRUNC, mode));
807 #if !defined(_LP64)
809 openat64(int fd, const char *path, int oflag, ...)
811 mode_t mode;
812 int rv;
813 va_list ap;
815 va_start(ap, oflag);
816 mode = va_arg(ap, mode_t);
817 va_end(ap);
818 PERFORM(__openat64(fd, path, oflag, mode))
822 open64(const char *path, int oflag, ...)
824 mode_t mode;
825 int rv;
826 va_list ap;
828 va_start(ap, oflag);
829 mode = va_arg(ap, mode_t);
830 va_end(ap);
831 PERFORM(__open64(path, oflag, mode))
835 creat64(const char *path, mode_t mode)
837 return (open64(path, O_WRONLY | O_CREAT | O_TRUNC, mode));
839 #endif /* !_LP64 */
842 pause(void)
844 extern int __pause(void);
845 int rv;
847 PERFORM(__pause())
850 ssize_t
851 pread(int fildes, void *buf, size_t nbyte, off_t offset)
853 extern ssize_t __pread(int, void *, size_t, off_t);
854 ssize_t rv;
856 PERFORM(__pread(fildes, buf, nbyte, offset))
859 #if !defined(_LP64)
860 ssize_t
861 pread64(int fildes, void *buf, size_t nbyte, off64_t offset)
863 extern ssize_t __pread64(int, void *, size_t, off64_t);
864 ssize_t rv;
866 PERFORM(__pread64(fildes, buf, nbyte, offset))
869 ssize_t
870 preadv64(int fildes, const struct iovec *iov, int iovcnt, off64_t offset)
873 extern ssize_t __preadv64(int, const struct iovec *, int, off_t, off_t);
874 ssize_t rv;
876 PERFORM(__preadv64(fildes, iov, iovcnt, offset & 0xffffffffULL,
877 offset>>32))
879 #endif /* !_LP64 */
881 ssize_t
882 preadv(int fildes, const struct iovec *iov, int iovcnt, off_t offset)
885 extern ssize_t __preadv(int, const struct iovec *, int, off_t, off_t);
886 ssize_t rv;
888 PERFORM(__preadv(fildes, iov, iovcnt, offset, 0))
890 ssize_t
891 pwrite(int fildes, const void *buf, size_t nbyte, off_t offset)
893 extern ssize_t __pwrite(int, const void *, size_t, off_t);
894 ssize_t rv;
896 PERFORM(__pwrite(fildes, buf, nbyte, offset))
899 #if !defined(_LP64)
900 ssize_t
901 pwrite64(int fildes, const void *buf, size_t nbyte, off64_t offset)
903 extern ssize_t __pwrite64(int, const void *, size_t, off64_t);
904 ssize_t rv;
906 PERFORM(__pwrite64(fildes, buf, nbyte, offset))
909 ssize_t
910 pwritev64(int fildes, const struct iovec *iov, int iovcnt, off64_t offset)
913 extern ssize_t __pwritev64(int,
914 const struct iovec *, int, off_t, off_t);
915 ssize_t rv;
917 PERFORM(__pwritev64(fildes, iov, iovcnt, offset &
918 0xffffffffULL, offset>>32))
921 #endif /* !_LP64 */
923 ssize_t
924 pwritev(int fildes, const struct iovec *iov, int iovcnt, off_t offset)
926 extern ssize_t __pwritev(int, const struct iovec *, int, off_t, off_t);
927 ssize_t rv;
929 PERFORM(__pwritev(fildes, iov, iovcnt, offset, 0))
932 ssize_t
933 readv(int fildes, const struct iovec *iov, int iovcnt)
935 extern ssize_t __readv(int, const struct iovec *, int);
936 ssize_t rv;
938 PERFORM(__readv(fildes, iov, iovcnt))
942 sigpause(int sig)
944 extern int __sigpause(int);
945 int rv;
947 PERFORM(__sigpause(sig))
951 sigsuspend(const sigset_t *set)
953 extern int __sigsuspend(const sigset_t *);
954 int rv;
956 PROLOGUE_MASK(set)
957 rv = __sigsuspend(set);
958 EPILOGUE_MASK
959 return (rv);
963 _pollsys(struct pollfd *fds, nfds_t nfd, const timespec_t *timeout,
964 const sigset_t *sigmask)
966 extern int __pollsys(struct pollfd *, nfds_t, const timespec_t *,
967 const sigset_t *);
968 int rv;
970 PROLOGUE_MASK(sigmask)
971 rv = __pollsys(fds, nfd, timeout, sigmask);
972 EPILOGUE_MASK
973 return (rv);
977 sigtimedwait(const sigset_t *set, siginfo_t *infop, const timespec_t *timeout)
979 extern int __sigtimedwait(const sigset_t *, siginfo_t *,
980 const timespec_t *);
981 siginfo_t info;
982 int sig;
984 PROLOGUE
985 if (abort) {
986 *self->ul_errnop = EINTR;
987 sig = -1;
988 } else {
989 sig = __sigtimedwait(set, &info, timeout);
990 if (sig == SIGCANCEL &&
991 (SI_FROMKERNEL(&info) || info.si_code == SI_LWP)) {
992 do_sigcancel();
993 *self->ul_errnop = EINTR;
994 sig = -1;
997 EPILOGUE
998 if (sig != -1 && infop)
999 (void) memcpy(infop, &info, sizeof (*infop));
1000 return (sig);
1004 sigwait(sigset_t *set)
1006 return (sigtimedwait(set, NULL, NULL));
1010 sigwaitinfo(const sigset_t *set, siginfo_t *info)
1012 return (sigtimedwait(set, info, NULL));
1016 sigqueue(pid_t pid, int signo, const union sigval value)
1018 extern int __sigqueue(pid_t pid, int signo,
1019 /* const union sigval */ void *value, int si_code, int block);
1020 return (__sigqueue(pid, signo, value.sival_ptr, SI_QUEUE, 0));
1024 _so_accept(int sock, struct sockaddr *addr, uint_t *addrlen, int version,
1025 int flags)
1027 extern int __so_accept(int, struct sockaddr *, uint_t *, int, int);
1028 int rv;
1030 PERFORM(__so_accept(sock, addr, addrlen, version, flags))
1034 _so_connect(int sock, struct sockaddr *addr, uint_t addrlen, int version)
1036 extern int __so_connect(int, struct sockaddr *, uint_t, int);
1037 int rv;
1039 PERFORM(__so_connect(sock, addr, addrlen, version))
1043 _so_recv(int sock, void *buf, size_t len, int flags)
1045 extern int __so_recv(int, void *, size_t, int);
1046 int rv;
1048 PERFORM(__so_recv(sock, buf, len, flags))
1052 _so_recvfrom(int sock, void *buf, size_t len, int flags,
1053 struct sockaddr *addr, int *addrlen)
1055 extern int __so_recvfrom(int, void *, size_t, int,
1056 struct sockaddr *, int *);
1057 int rv;
1059 PERFORM(__so_recvfrom(sock, buf, len, flags, addr, addrlen))
1063 _so_recvmsg(int sock, struct msghdr *msg, int flags)
1065 extern int __so_recvmsg(int, struct msghdr *, int);
1066 int rv;
1068 PERFORM(__so_recvmsg(sock, msg, flags))
1072 _so_send(int sock, const void *buf, size_t len, int flags)
1074 extern int __so_send(int, const void *, size_t, int);
1075 int rv;
1077 PERFORM(__so_send(sock, buf, len, flags))
1081 _so_sendmsg(int sock, const struct msghdr *msg, int flags)
1083 extern int __so_sendmsg(int, const struct msghdr *, int);
1084 int rv;
1086 PERFORM(__so_sendmsg(sock, msg, flags))
1090 _so_sendto(int sock, const void *buf, size_t len, int flags,
1091 const struct sockaddr *addr, int *addrlen)
1093 extern int __so_sendto(int, const void *, size_t, int,
1094 const struct sockaddr *, int *);
1095 int rv;
1097 PERFORM(__so_sendto(sock, buf, len, flags, addr, addrlen))
1101 tcdrain(int fildes)
1103 extern int __tcdrain(int);
1104 int rv;
1106 PERFORM(__tcdrain(fildes))
1110 waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options)
1112 extern int __waitid(idtype_t, id_t, siginfo_t *, int);
1113 int rv;
1115 if (options & WNOHANG)
1116 return (__waitid(idtype, id, infop, options));
1117 PERFORM(__waitid(idtype, id, infop, options))
1120 ssize_t
1121 writev(int fildes, const struct iovec *iov, int iovcnt)
1123 extern ssize_t __writev(int, const struct iovec *, int);
1124 ssize_t rv;
1126 PERFORM(__writev(fildes, iov, iovcnt))