4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
26 /* Copyright (c) 1988 AT&T */
27 /* All Rights Reserved */
32 #include <sys/types.h>
50 #define FILE_ARY_SZ 8 /* a nice size for FILE array & end_buffer_ptrs */
55 * Macros to declare and loop over a fp or fp/xfp combo to
56 * avoid some of the _LP64 ifdef hell.
59 #define FPDECL(fp) FILE *fp
60 #define FIRSTFP(lp, fp) fp = lp->iobp
61 #define NEXTFP(fp) fp++
62 #define FPLOCK(fp) &fp->_lock
63 #define FPSTATE(fp) &fp->_state
69 #define FPDECL(fp) FILE *fp; xFILE *x##fp
70 #define FIRSTFP(lp, fp) x##fp = lp->iobp; \
71 fp = x##fp ? &x##fp->_iob : &_iob[0]
72 #define NEXTFP(fp) (x##fp ? fp = &(++x##fp)->_iob : ++fp)
73 #define FPLOCK(fp) x##fp ? \
74 &x##fp->xlock : &_xftab[IOPIND(fp)]._lock
75 #define FPSTATE(fp) x##fp ? \
76 &x##fp->xstate : &_xftab[IOPIND(fp)]._state
78 /* The extended 32-bit file structure for use in link buffers */
79 typedef struct xFILE
{
80 FILE _iob
; /* must be first! */
81 struct xFILEdata _xdat
;
84 #define xmagic _xdat._magic
85 #define xend _xdat._end
86 #define xlock _xdat._lock
87 #define xstate _xdat._state
89 #define FILEx(fp) ((struct xFILE *)(uintptr_t)fp)
92 * The magic number stored is actually the pointer scrambled with
93 * a magic number. Pointers to data items live everywhere in memory
94 * so we scramble the pointer in order to avoid accidental collisions.
96 #define XFILEMAGIC 0x63687367
97 #define XMAGIC(xfp) ((uintptr_t)(xfp) ^ XFILEMAGIC)
101 struct _link_
/* manages a list of streams */
103 xFILE
*iobp
; /* the array of (x)FILE's */
104 /* NULL for the __first_link in ILP32 */
105 int niob
; /* length of the arrays */
106 struct _link_
*next
; /* next in the list */
110 * With dynamic linking, iob may be in either the library or in the user's
111 * a.out, so the run time linker fixes up the first entry in __first_link at
112 * process startup time.
114 * In 32 bit processes, we don't have xFILE[FILE_ARY_SZ] but FILE[],
115 * and _xftab[] instead; this is denoted by having iobp set to NULL in
116 * 32 bit mode for the first link entry.
118 struct _link_ __first_link
= /* first in linked list */
130 * Information cached to speed up searches. We remember where we
131 * last found a free FILE* and we remember whether we saw any fcloses
132 * in between. We also count the number of chunks we allocated, see
133 * _findiop() for an explanation.
134 * These variables are all protected by _first_link_lock.
136 static struct _link_
*lastlink
= NULL
;
140 static mutex_t _first_link_lock
= DEFAULTMUTEX
;
142 static int _fflush_l_iops(void);
143 static FILE *getiop(FILE *, rmutex_t
*, mbstate_t *);
146 * All functions that understand the linked list of iob's follow.
148 #pragma weak _cleanup = __cleanup
150 __cleanup(void) /* called at process end to flush ouput streams */
156 * For fork1-safety (see libc_prepare_atfork(), etc).
161 (void) mutex_lock(&_first_link_lock
);
163 * XXX: We should acquire all of the iob locks here.
171 * XXX: We should release all of the iob locks here.
173 (void) mutex_unlock(&_first_link_lock
);
177 _flushlbf(void) /* fflush() all line-buffered streams */
182 /* Allow compiler to optimize the loop */
183 int threaded
= __libc_threaded
;
186 cancel_safe_mutex_lock(&_first_link_lock
);
191 for (i
= lp
->niob
; --i
>= 0; NEXTFP(fp
)) {
193 * The additional _IONBF check guards againsts
194 * allocated but uninitialized iops (see _findiop).
195 * We also automatically skip non allocated iop's.
196 * Don't block on locks.
198 if ((fp
->_flag
& (_IOLBF
| _IOWRT
| _IONBF
)) ==
201 rmutex_t
*lk
= FPLOCK(fp
);
202 if (cancel_safe_mutex_trylock(lk
) != 0)
204 /* Recheck after locking */
205 if ((fp
->_flag
& (_IOLBF
| _IOWRT
)) ==
207 (void) _fflush_u(fp
);
209 cancel_safe_mutex_unlock(lk
);
211 (void) _fflush_u(fp
);
215 } while ((lp
= lp
->next
) != NULL
);
218 cancel_safe_mutex_unlock(&_first_link_lock
);
221 /* allocate an unused stream; NULL if cannot */
225 struct _link_
*lp
, **prev
;
227 /* used so there only needs to be one malloc() */
231 FILE iob
[FILE_ARY_SZ
];
235 struct { /* Normal */
237 xFILE iob
[FILE_ARY_SZ
];
239 struct { /* Reversed */
240 xFILE iob
[FILE_ARY_SZ
];
250 int threaded
= __libc_threaded
;
253 cancel_safe_mutex_lock(&_first_link_lock
);
255 if (lastlink
== NULL
) {
258 lastlink
= &__first_link
;
264 * lock to make testing of fp->_flag == 0 and acquiring the fp atomic
265 * and for allocation of new links
266 * low contention expected on _findiop(), hence coarse locking.
267 * for finer granularity, use fp->_lock for allocating an iop
268 * and make the testing of lp->next and allocation of new link atomic
276 for (i
= lp
->niob
; --i
>= 0; NEXTFP(fp
)) {
279 ret
= getiop(fp
, FPLOCK(fp
), FPSTATE(fp
));
281 cancel_safe_mutex_unlock(
286 ret
= getiop(fp
, NULL
, FPSTATE(fp
));
291 } while ((lastlink
= lp
= lp
->next
) != NULL
);
294 * If there was a sufficient number of fcloses since we last started
295 * at __first_link, we rescan all fp's again. We do not rescan for
296 * all fcloses; that would simplify the algorithm but would make
297 * search times near O(n) again.
298 * Worst case behaviour would still be pretty bad (open a full set,
299 * then continously opening and closing one FILE * gets you a full
300 * scan each time). That's why we over allocate 1 FILE for each
301 * 32 chunks. More over allocation is better; this is a nice
302 * empirical value which doesn't cost a lot of memory, doesn't
303 * overallocate until we reach 256 FILE *s and keeps the performance
304 * pretty close to the optimum.
306 if (fcloses
> nchunks
/32)
310 * Need to allocate another and put it in the linked list.
312 if ((pkgp
= malloc(sizeof (Pkg
))) == NULL
) {
314 cancel_safe_mutex_unlock(&_first_link_lock
);
318 (void) memset(pkgp
, 0, sizeof (Pkg
));
322 hdr
->iobp
= &pkgp
->iob
[0];
325 * The problem with referencing a word after a FILE* is the possibility
326 * of a SIGSEGV if a non-stdio issue FILE structure ends on a page
327 * boundary. We run this check so we never need to run an expensive
328 * check like mincore() in order to know whether it is
329 * safe to dereference ((xFILE*)fp)->xmagic.
330 * We allocate the block with two alternative layouts; if one
331 * layout is not properly aligned for our purposes, the other layout
332 * will be because the size of _link_ is small compared to
334 * The check performed is this:
335 * If the distance from pkgp to the end of the page is
336 * less than the the offset of the last xmagic field in the
337 * xFILE structure, (the 0x1000 boundary is inside our just
338 * allocated structure) and the distance modulo the size of xFILE
339 * is identical to the offset of the first xmagic in the
340 * structure (i.e., XXXXXX000 points to an xmagic field),
341 * we need to use the reverse structure.
343 if ((delta
= 0x1000 - ((uintptr_t)pkgp
& 0xfff)) <=
344 offsetof(Pkg
, Pkgn
.iob
[FILE_ARY_SZ
-1].xmagic
) &&
345 delta
% sizeof (struct xFILE
) ==
346 offsetof(Pkg
, Pkgn
.iob
[0].xmagic
)) {
347 /* Use reversed structure */
348 hdr
= &pkgp
->Pkgr
.hdr
;
349 hdr
->iobp
= &pkgp
->Pkgr
.iob
[0];
351 /* Use normal structure */
352 hdr
= &pkgp
->Pkgn
.hdr
;
353 hdr
->iobp
= &pkgp
->Pkgn
.iob
[0];
357 hdr
->niob
= FILE_ARY_SZ
;
362 for (i
= 0; i
< FILE_ARY_SZ
; i
++)
363 (void) mutex_init(&fp
[i
]._lock
,
364 USYNC_THREAD
| LOCK_RECURSIVE
, NULL
);
369 for (i
= 0; i
< FILE_ARY_SZ
; i
++) {
370 xfp
[i
].xmagic
= XMAGIC(&xfp
[i
]);
371 (void) mutex_init(&xfp
[i
].xlock
,
372 USYNC_THREAD
| LOCK_RECURSIVE
, NULL
);
376 lastlink
= *prev
= hdr
;
379 fp
->_flag
= 0377; /* claim the fp by setting low 8 bits */
381 cancel_safe_mutex_unlock(&_first_link_lock
);
387 isseekable(FILE *iop
)
389 struct stat64 fstatbuf
;
394 if (fstat64(GET_FD(iop
), &fstatbuf
) != 0) {
396 * when we don't know what it is we'll
397 * do the old behaviour and flush
406 * check for what is non-SEEKABLE
407 * otherwise assume it's SEEKABLE so we get the old
408 * behaviour and flush the stream
411 if (S_ISFIFO(fstatbuf
.st_mode
) || S_ISCHR(fstatbuf
.st_mode
) ||
412 S_ISSOCK(fstatbuf
.st_mode
) || S_ISDOOR(fstatbuf
.st_mode
)) {
423 _setbufend(FILE *iop
, Uchar
*end
) /* set the end pointer for this iop */
433 _realbufend(FILE *iop
) /* get the end pointer for this iop */
441 * Awkward functions not needed for the sane 64 bit environment.
444 * xmagic must not be aligned on a 4K boundary. We guarantee this in
447 #define VALIDXFILE(xfp) \
448 (((uintptr_t)&(xfp)->xmagic & 0xfff) && \
449 (xfp)->xmagic == XMAGIC(FILEx(xfp)))
451 static struct xFILEdata
*
455 return (&_xftab
[IOPIND(iop
)]);
456 else if (VALIDXFILE(FILEx(iop
)))
457 return (&FILEx(iop
)->_xdat
);
463 _setbufend(FILE *iop
, Uchar
*end
) /* set the end pointer for this iop */
465 struct xFILEdata
*dat
= getxfdat(iop
);
473 * For binary compatibility with user programs using the
474 * old _bufend macro. This is *so* broken, fileno()
475 * is not the proper index.
477 if (iop
->_magic
< _NFILE
)
478 _bufendtab
[iop
->_magic
] = end
;
483 _realbufend(FILE *iop
) /* get the end pointer for this iop */
485 struct xFILEdata
*dat
= getxfdat(iop
);
494 * _reallock() is invoked in each stdio call through the IOB_LCK() macro,
495 * it is therefor extremely performance sensitive. We get better performance
496 * by inlining the STDIOP check in IOB_LCK and inlining a custom version
497 * of getfxdat() here.
502 if (VALIDXFILE(FILEx(iop
)))
503 return (&FILEx(iop
)->xlock
);
510 /* make sure _cnt, _ptr are correct */
512 _bufsync(FILE *iop
, Uchar
*bufend
)
516 spaceleft
= bufend
- iop
->_ptr
;
517 if (bufend
< iop
->_ptr
) {
520 } else if (spaceleft
< iop
->_cnt
)
521 iop
->_cnt
= spaceleft
;
524 /* really write out current buffer contents */
529 Uchar
*base
= iop
->_base
;
534 * Hopefully, be stable with respect to interrupts...
536 n
= iop
->_ptr
- base
;
538 bufend
= _bufend(iop
);
539 if (iop
->_flag
& (_IOLBF
| _IONBF
))
540 iop
->_cnt
= 0; /* always go to a flush */
542 iop
->_cnt
= bufend
- base
;
544 if (_needsync(iop
, bufend
)) /* recover from interrupts */
545 _bufsync(iop
, bufend
);
548 int fd
= GET_FD(iop
);
549 while ((num_wrote
= write(fd
, base
, (size_t)n
)) != n
) {
550 if (num_wrote
<= 0) {
551 if (!cancel_active())
552 iop
->_flag
|= _IOERR
;
562 /* flush (write) buffer */
571 res
= _fflush_u(iop
);
574 res
= _fflush_l_iops(); /* flush all iops */
580 _fflush_l_iops(void) /* flush all buffers */
588 /* Allow the compiler to optimize the load out of the loop */
589 int threaded
= __libc_threaded
;
592 cancel_safe_mutex_lock(&_first_link_lock
);
598 * We need to grab the file locks or file corruption
599 * will happen. But we first check the flags field
600 * knowing that when it is 0, it isn't allocated and
601 * cannot be allocated while we're holding the
602 * _first_link_lock. And when _IONBF is set (also the
603 * case when _flag is 0377, or alloc in progress), we
606 * Ignore locked streams; it will appear as if
607 * concurrent updates happened after fflush(NULL). Note
608 * that we even attempt to lock if the locking is set to
609 * "by caller". We don't want to penalize callers of
610 * __fsetlocking() by not flushing their files. Note: if
611 * __fsetlocking() callers don't employ any locking, they
612 * may still face corruption in fflush(NULL); but that's
613 * no change from earlier releases.
616 for (i
= lp
->niob
; --i
>= 0; NEXTFP(iop
)) {
617 unsigned int flag
= iop
->_flag
;
619 /* flag 0, flag 0377, or _IONBF set */
620 if (flag
== 0 || (flag
& _IONBF
) != 0)
625 if (cancel_safe_mutex_trylock(lk
) != 0)
629 if (!(iop
->_flag
& _IONBF
)) {
631 * don't need to worry about the _IORW case
632 * since the iop will also marked with _IOREAD
633 * or _IOWRT whichever we are really doing
635 if (iop
->_flag
& _IOWRT
) {
636 /* Flush write buffers */
637 res
|= _fflush_u(iop
);
638 } else if (iop
->_flag
& _IOREAD
) {
640 * flush seekable read buffers
641 * don't flush non-seekable read buffers
643 if (GET_SEEKABLE(iop
)) {
644 res
|= _fflush_u(iop
);
649 cancel_safe_mutex_unlock(lk
);
651 } while ((lp
= lp
->next
) != NULL
);
653 cancel_safe_mutex_unlock(&_first_link_lock
);
663 /* this portion is always assumed locked */
664 if (!(iop
->_flag
& _IOWRT
)) {
665 (void) lseek64(GET_FD(iop
), -iop
->_cnt
, SEEK_CUR
);
667 /* needed for ungetc & multibyte pushbacks */
668 iop
->_ptr
= iop
->_base
;
669 if (iop
->_flag
& _IORW
) {
670 iop
->_flag
&= ~_IOREAD
;
674 if (iop
->_base
!= NULL
&& iop
->_ptr
> iop
->_base
) {
677 if (iop
->_flag
& _IORW
) {
678 iop
->_flag
&= ~_IOWRT
;
684 /* flush buffer and close stream */
692 return (EOF
); /* avoid passing zero to FLOCKFILE */
696 if (iop
->_flag
== 0) {
700 /* Is not unbuffered and opened for read and/or write ? */
701 if (!(iop
->_flag
& _IONBF
) && (iop
->_flag
& (_IOWRT
| _IOREAD
| _IORW
)))
702 res
= _fflush_u(iop
);
703 if (close(GET_FD(iop
)) < 0)
705 if (iop
->_flag
& _IOMYBUF
) {
706 (void) free((char *)iop
->_base
- PUSHBACK
);
711 iop
->_flag
= 0; /* marks it as available */
715 cancel_safe_mutex_lock(&_first_link_lock
);
718 cancel_safe_mutex_unlock(&_first_link_lock
);
723 /* close all open streams */
733 cancel_safe_mutex_lock(&_first_link_lock
);
741 for (i
= lp
->niob
; --i
>= 0; NEXTFP(iop
)) {
742 /* code stolen from fclose(), above */
745 if (iop
->_flag
== 0) {
750 /* Not unbuffered and opened for read and/or write? */
751 if (!(iop
->_flag
& _IONBF
) &&
752 (iop
->_flag
& (_IOWRT
| _IOREAD
| _IORW
)))
753 (void) _fflush_u(iop
);
754 (void) close(GET_FD(iop
));
755 if (iop
->_flag
& _IOMYBUF
)
756 free((char *)iop
->_base
- PUSHBACK
);
760 iop
->_flag
= 0; /* marks it as available */
764 } while ((lp
= lp
->next
) != NULL
);
767 cancel_safe_mutex_unlock(&_first_link_lock
);
772 /* flush buffer, close fd but keep the stream used by freopen() */
779 if (iop
== NULL
|| iop
->_flag
== 0)
781 /* Is not unbuffered and opened for read and/or write ? */
782 if (!(iop
->_flag
& _IONBF
) && (iop
->_flag
& (_IOWRT
| _IOREAD
| _IORW
)))
783 res
= _fflush_u(iop
);
784 if (close(GET_FD(iop
)) < 0)
786 if (iop
->_flag
& _IOMYBUF
) {
787 (void) free((char *)iop
->_base
- PUSHBACK
);
791 mb
= _getmbstate(iop
);
793 (void) memset(mb
, 0, sizeof (mbstate_t));
795 _setorientation(iop
, _NO_MODE
);
800 getiop(FILE *fp
, rmutex_t
*lk
, mbstate_t *mb
)
802 if (lk
!= NULL
&& cancel_safe_mutex_trylock(lk
) != 0)
803 return (NULL
); /* locked: fp in use */
805 if (fp
->_flag
== 0) { /* unused */
807 fp
->__orientation
= 0;
812 fp
->_flag
= 0377; /* claim the fp by setting low 8 bits */
813 (void) memset(mb
, 0, sizeof (mbstate_t));
824 * This function gets the pointer to the mbstate_t structure associated
825 * with the specified iop.
828 * If the associated mbstate_t found, the pointer to the mbstate_t is
829 * returned. Otherwise, NULL is returned.
832 _getmbstate(FILE *iop
)
834 struct xFILEdata
*dat
= getxfdat(iop
);
837 return (&dat
->_state
);
843 * More 32-bit only functions.
844 * They lookup/set large fd's for extended FILE support.
848 * The negative value indicates that Extended fd FILE's has not
849 * been enabled by the user.
851 static int bad_fd
= -1;
859 * Failure indicates a FILE * not allocated through stdio;
860 * it means the flag values are probably bogus and that if
861 * a file descriptor is set, it's in _magic.
862 * Inline getxfdat() for performance reasons.
865 altfd
= _xftab
[IOPIND(iop
)]._altfd
;
866 else if (VALIDXFILE(FILEx(iop
)))
867 altfd
= FILEx(iop
)->_xdat
._altfd
;
869 return (iop
->_magic
);
871 * if this is not an internal extended FILE then check
872 * if _file is being changed from underneath us.
873 * It should not be because if
874 * it is then then we lose our ability to guard against
875 * silent data corruption.
877 if (!iop
->__xf_nocheck
&& bad_fd
> -1 && iop
->_magic
!= bad_fd
) {
878 (void) fprintf(stderr
,
879 "Application violated extended FILE safety mechanism.\n"
880 "Please read the man page for extendedFILE.\nAborting\n");
887 _file_set(FILE *iop
, int fd
, const char *type
)
889 struct xFILEdata
*dat
;
892 /* Already known to contain at least one byte */
893 while (*++type
!= '\0')
896 Fflag
= type
[-1] == 'F';
897 if (!Fflag
&& bad_fd
< 0) {
903 iop
->__extendedfd
= 1;
904 iop
->__xf_nocheck
= Fflag
;
906 iop
->_magic
= (unsigned char)bad_fd
;
911 * Activates extended fd's in FILE's
914 static const int tries
[] = {196, 120, 60, 3};
915 #define NTRIES (sizeof (tries)/sizeof (int))
918 enable_extended_FILE_stdio(int fd
, int action
)
923 action
= SIGABRT
; /* default signal */
927 * search for an available fd and make it the badfd
929 for (i
= 0; i
< NTRIES
; i
++) {
930 fd
= fcntl(tries
[i
], F_BADFD
, action
);
934 if (fd
< 0) /* failed to find an available fd */
937 /* caller requests that fd be the chosen badfd */
938 int nfd
= fcntl(fd
, F_BADFD
, action
);
939 if (nfd
< 0 || nfd
!= fd
)