dmake: do not set MAKEFLAGS=k
[unleashed/tickless.git] / usr / src / lib / libc / port / nsl / _utility.c
blob3057adf310b32c3d6d9030d160ed8e8ed8163b78
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
23 /* All Rights Reserved */
26 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
27 * Use is subject to license terms.
30 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
33 #include "mt.h"
34 #include <stdlib.h>
35 #include <string.h>
36 #include <strings.h>
37 #include <unistd.h>
38 #include <errno.h>
39 #include <stropts.h>
40 #include <sys/stream.h>
41 #define _SUN_TPI_VERSION 2
42 #include <sys/tihdr.h>
43 #include <sys/timod.h>
44 #include <sys/stat.h>
45 #include <xti.h>
46 #include <fcntl.h>
47 #include <signal.h>
48 #include <assert.h>
49 #include <syslog.h>
50 #include <limits.h>
51 #include <ucred.h>
52 #include "tx.h"
54 #define DEFSIZE 2048
57 * The following used to be in tiuser.h, but was causing too much namespace
58 * pollution.
60 #define ROUNDUP32(X) ((X + 0x03)&~0x03)
62 static struct _ti_user *find_tilink(int s);
63 static struct _ti_user *add_tilink(int s);
64 static void _t_free_lookbufs(struct _ti_user *tiptr);
65 static unsigned int _t_setsize(t_scalar_t infosize, boolean_t option);
66 static int _t_cbuf_alloc(struct _ti_user *tiptr, char **retbuf);
67 static int _t_rbuf_alloc(struct _ti_user *tiptr, char **retbuf);
68 static int _t_adjust_state(int fd, int instate);
69 static int _t_alloc_bufs(int fd, struct _ti_user *tiptr,
70 struct T_info_ack *tsap);
72 mutex_t _ti_userlock = DEFAULTMUTEX; /* Protects hash_bucket[] */
75 * Checkfd - checks validity of file descriptor
77 struct _ti_user *
78 _t_checkfd(int fd, int force_sync, int api_semantics)
80 sigset_t mask;
81 struct _ti_user *tiptr;
82 int retval, timodpushed;
84 if (fd < 0) {
85 t_errno = TBADF;
86 return (NULL);
89 if (!force_sync) {
90 sig_mutex_lock(&_ti_userlock);
91 tiptr = find_tilink(fd);
92 sig_mutex_unlock(&_ti_userlock);
93 if (tiptr != NULL)
94 return (tiptr);
98 * Not found or a forced sync is required.
99 * check if this is a valid TLI/XTI descriptor.
101 timodpushed = 0;
102 do {
103 retval = ioctl(fd, I_FIND, "timod");
104 } while (retval < 0 && errno == EINTR);
106 if (retval < 0 || (retval == 0 && _T_IS_TLI(api_semantics))) {
108 * not a stream or a TLI endpoint with no timod
109 * XXX Note: If it is a XTI call, we push "timod" and
110 * try to convert it into a transport endpoint later.
111 * We do not do it for TLI and "retain" the old buggy
112 * behavior because ypbind and a lot of other deamons seem
113 * to use a buggy logic test of the form
114 * "(t_getstate(0) != -1 || t_errno != TBADF)" to see if
115 * they we ever invoked with request on stdin and drop into
116 * untested code. This test is in code generated by rpcgen
117 * which is why it is replicated test in many daemons too.
118 * We will need to fix that test too with "IsaTLIendpoint"
119 * test if we ever fix this for TLI
121 t_errno = TBADF;
122 return (NULL);
125 if (retval == 0) {
127 * "timod" not already on stream, then push it
129 do {
131 * Assumes (correctly) that I_PUSH is
132 * atomic w.r.t signals (EINTR error)
134 retval = ioctl(fd, I_PUSH, "timod");
135 } while (retval < 0 && errno == EINTR);
137 if (retval < 0) {
138 t_errno = TSYSERR;
139 return (NULL);
141 timodpushed = 1;
144 * Try to (re)constitute the info at user level from state
145 * in the kernel. This could be information that lost due
146 * to an exec or being instantiated at a new descriptor due
147 * to , open(), dup2() etc.
149 * _t_create() requires that all signals be blocked.
150 * Note that sig_mutex_lock() only defers signals, it does not
151 * block them, so interruptible syscalls could still get EINTR.
153 (void) thr_sigsetmask(SIG_SETMASK, &fillset, &mask);
154 sig_mutex_lock(&_ti_userlock);
155 tiptr = _t_create(fd, NULL, api_semantics, NULL);
156 if (tiptr == NULL) {
157 int sv_errno = errno;
158 sig_mutex_unlock(&_ti_userlock);
159 (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
161 * restore to stream before timod pushed. It may
162 * not have been a network transport stream.
164 if (timodpushed)
165 (void) ioctl(fd, I_POP, 0);
166 errno = sv_errno;
167 return (NULL);
169 sig_mutex_unlock(&_ti_userlock);
170 (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
171 return (tiptr);
175 * copy data to output buffer making sure the output buffer is 32 bit
176 * aligned, even though the input buffer may not be.
179 _t_aligned_copy(
180 struct strbuf *strbufp,
181 int len,
182 int init_offset,
183 char *datap,
184 t_scalar_t *rtn_offset)
186 *rtn_offset = ROUNDUP32(init_offset);
187 if ((*rtn_offset + len) > strbufp->maxlen) {
189 * Aligned copy will overflow buffer
191 return (-1);
193 (void) memcpy(strbufp->buf + *rtn_offset, datap, (size_t)len);
195 return (0);
200 * append data and control info in look buffer (list in the MT case)
202 * The only thing that can be in look buffer is a T_DISCON_IND,
203 * T_ORDREL_IND or a T_UDERROR_IND.
205 * It also enforces priority of T_DISCONDs over any T_ORDREL_IND
206 * already in the buffer. It assumes no T_ORDREL_IND is appended
207 * when there is already something on the looklist (error case) and
208 * that a T_ORDREL_IND if present will always be the first on the
209 * list.
211 * This also assumes ti_lock is held via sig_mutex_lock(),
212 * so signals are deferred here.
215 _t_register_lookevent(
216 struct _ti_user *tiptr,
217 caddr_t dptr,
218 int dsize,
219 caddr_t cptr,
220 int csize)
222 struct _ti_lookbufs *tlbs;
223 int cbuf_size, dbuf_size;
225 assert(MUTEX_HELD(&tiptr->ti_lock));
227 cbuf_size = tiptr->ti_ctlsize;
228 dbuf_size = tiptr->ti_rcvsize;
230 if ((csize > cbuf_size) || dsize > dbuf_size) {
231 /* can't fit - return error */
232 return (-1); /* error */
235 * Enforce priority of T_DISCON_IND over T_ORDREL_IND
236 * queued earlier.
237 * Note: Since there can be only at most one T_ORDREL_IND
238 * queued (more than one is error case), and we look for it
239 * on each append of T_DISCON_IND, it can only be at the
240 * head of the list if it is there.
242 if (tiptr->ti_lookcnt > 0) { /* something already on looklist */
243 if (cptr && csize >= (int)sizeof (struct T_discon_ind) &&
244 /* LINTED pointer cast */
245 *(t_scalar_t *)cptr == T_DISCON_IND) {
246 /* appending discon ind */
247 assert(tiptr->ti_servtype != T_CLTS);
248 /* LINTED pointer cast */
249 if (*(t_scalar_t *)tiptr->ti_lookbufs.tl_lookcbuf ==
250 T_ORDREL_IND) { /* T_ORDREL_IND is on list */
252 * Blow away T_ORDREL_IND
254 _t_free_looklist_head(tiptr);
258 tlbs = &tiptr->ti_lookbufs;
259 if (tiptr->ti_lookcnt > 0) {
260 int listcount = 0;
262 * Allocate and append a new lookbuf to the
263 * existing list. (Should only happen in MT case)
265 while (tlbs->tl_next != NULL) {
266 listcount++;
267 tlbs = tlbs->tl_next;
269 assert(tiptr->ti_lookcnt == listcount);
272 * signals are deferred, calls to malloc() are safe.
274 if ((tlbs->tl_next = malloc(sizeof (struct _ti_lookbufs))) ==
275 NULL)
276 return (-1); /* error */
277 tlbs = tlbs->tl_next;
279 * Allocate the buffers. The sizes derived from the
280 * sizes of other related buffers. See _t_alloc_bufs()
281 * for details.
283 if ((tlbs->tl_lookcbuf = malloc(cbuf_size)) == NULL) {
284 /* giving up - free other memory chunks */
285 free(tlbs);
286 return (-1); /* error */
288 if ((dsize > 0) &&
289 ((tlbs->tl_lookdbuf = malloc(dbuf_size)) == NULL)) {
290 /* giving up - free other memory chunks */
291 free(tlbs->tl_lookcbuf);
292 free(tlbs);
293 return (-1); /* error */
297 (void) memcpy(tlbs->tl_lookcbuf, cptr, csize);
298 if (dsize > 0)
299 (void) memcpy(tlbs->tl_lookdbuf, dptr, dsize);
300 tlbs->tl_lookdlen = dsize;
301 tlbs->tl_lookclen = csize;
302 tlbs->tl_next = NULL;
303 tiptr->ti_lookcnt++;
304 return (0); /* ok return */
308 * Is there something that needs attention?
309 * Assumes tiptr->ti_lock held and this threads signals blocked
310 * in MT case.
313 _t_is_event(int fd, struct _ti_user *tiptr)
315 int size, retval;
317 assert(MUTEX_HELD(&tiptr->ti_lock));
318 if ((retval = ioctl(fd, I_NREAD, &size)) < 0) {
319 t_errno = TSYSERR;
320 return (-1);
323 if ((retval > 0) || (tiptr->ti_lookcnt > 0)) {
324 t_errno = TLOOK;
325 return (-1);
327 return (0);
331 * wait for T_OK_ACK
332 * assumes tiptr->ti_lock held in MT case
335 _t_is_ok(int fd, struct _ti_user *tiptr, t_scalar_t type)
337 struct strbuf ctlbuf;
338 struct strbuf databuf;
339 union T_primitives *pptr;
340 int retval, cntlflag;
341 int size;
342 int didalloc, didralloc;
343 int flags = 0;
345 assert(MUTEX_HELD(&tiptr->ti_lock));
347 * Acquire ctlbuf for use in sending/receiving control part
348 * of the message.
350 if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0)
351 return (-1);
353 * Acquire databuf for use in sending/receiving data part
355 if (_t_acquire_databuf(tiptr, &databuf, &didralloc) < 0) {
356 if (didalloc)
357 free(ctlbuf.buf);
358 else
359 tiptr->ti_ctlbuf = ctlbuf.buf;
360 return (-1);
364 * Temporarily convert a non blocking endpoint to a
365 * blocking one and restore status later
367 cntlflag = fcntl(fd, F_GETFL, 0);
368 if (cntlflag & (O_NDELAY | O_NONBLOCK))
369 (void) fcntl(fd, F_SETFL, cntlflag & ~(O_NDELAY | O_NONBLOCK));
371 flags = RS_HIPRI;
373 while ((retval = getmsg(fd, &ctlbuf, &databuf, &flags)) < 0) {
374 if (errno == EINTR)
375 continue;
376 if (cntlflag & (O_NDELAY | O_NONBLOCK))
377 (void) fcntl(fd, F_SETFL, cntlflag);
378 t_errno = TSYSERR;
379 goto err_out;
382 /* did I get entire message */
383 if (retval > 0) {
384 if (cntlflag & (O_NDELAY | O_NONBLOCK))
385 (void) fcntl(fd, F_SETFL, cntlflag);
386 t_errno = TSYSERR;
387 errno = EIO;
388 goto err_out;
392 * is ctl part large enough to determine type?
394 if (ctlbuf.len < (int)sizeof (t_scalar_t)) {
395 if (cntlflag & (O_NDELAY | O_NONBLOCK))
396 (void) fcntl(fd, F_SETFL, cntlflag);
397 t_errno = TSYSERR;
398 errno = EPROTO;
399 goto err_out;
402 if (cntlflag & (O_NDELAY | O_NONBLOCK))
403 (void) fcntl(fd, F_SETFL, cntlflag);
405 /* LINTED pointer cast */
406 pptr = (union T_primitives *)ctlbuf.buf;
408 switch (pptr->type) {
409 case T_OK_ACK:
410 if ((ctlbuf.len < (int)sizeof (struct T_ok_ack)) ||
411 (pptr->ok_ack.CORRECT_prim != type)) {
412 t_errno = TSYSERR;
413 errno = EPROTO;
414 goto err_out;
416 if (didalloc)
417 free(ctlbuf.buf);
418 else
419 tiptr->ti_ctlbuf = ctlbuf.buf;
420 if (didralloc)
421 free(databuf.buf);
422 else
423 tiptr->ti_rcvbuf = databuf.buf;
424 return (0);
426 case T_ERROR_ACK:
427 if ((ctlbuf.len < (int)sizeof (struct T_error_ack)) ||
428 (pptr->error_ack.ERROR_prim != type)) {
429 t_errno = TSYSERR;
430 errno = EPROTO;
431 goto err_out;
434 * if error is out of state and there is something
435 * on read queue, then indicate to user that
436 * there is something that needs attention
438 if (pptr->error_ack.TLI_error == TOUTSTATE) {
439 if ((retval = ioctl(fd, I_NREAD, &size)) < 0) {
440 t_errno = TSYSERR;
441 goto err_out;
443 if (retval > 0)
444 t_errno = TLOOK;
445 else
446 t_errno = TOUTSTATE;
447 } else {
448 t_errno = pptr->error_ack.TLI_error;
449 if (t_errno == TSYSERR)
450 errno = pptr->error_ack.UNIX_error;
452 goto err_out;
453 default:
454 t_errno = TSYSERR;
455 errno = EPROTO;
456 /* fallthru to err_out: */
458 err_out:
459 if (didalloc)
460 free(ctlbuf.buf);
461 else
462 tiptr->ti_ctlbuf = ctlbuf.buf;
463 if (didralloc)
464 free(databuf.buf);
465 else
466 tiptr->ti_rcvbuf = databuf.buf;
467 return (-1);
471 * timod ioctl
474 _t_do_ioctl(int fd, char *buf, int size, int cmd, int *retlenp)
476 int retval;
477 struct strioctl strioc;
479 strioc.ic_cmd = cmd;
480 strioc.ic_timout = -1;
481 strioc.ic_len = size;
482 strioc.ic_dp = buf;
484 if ((retval = ioctl(fd, I_STR, &strioc)) < 0) {
485 t_errno = TSYSERR;
486 return (-1);
489 if (retval > 0) {
490 t_errno = retval & 0xff;
491 if (t_errno == TSYSERR)
492 errno = (retval >> 8) & 0xff;
493 return (-1);
495 if (retlenp)
496 *retlenp = strioc.ic_len;
497 return (0);
501 * alloc scratch buffers and look buffers
503 /* ARGSUSED */
504 static int
505 _t_alloc_bufs(int fd, struct _ti_user *tiptr, struct T_info_ack *tsap)
507 unsigned int size1, size2;
508 t_scalar_t optsize;
509 unsigned int csize, dsize, asize, osize;
510 char *ctlbuf, *rcvbuf;
511 char *lookdbuf, *lookcbuf;
513 csize = _t_setsize(tsap->CDATA_size, B_FALSE);
514 dsize = _t_setsize(tsap->DDATA_size, B_FALSE);
516 size1 = _T_MAX(csize, dsize);
518 if (size1 != 0) {
519 if ((rcvbuf = malloc(size1)) == NULL)
520 return (-1);
521 if ((lookdbuf = malloc(size1)) == NULL) {
522 free(rcvbuf);
523 return (-1);
525 } else {
526 rcvbuf = NULL;
527 lookdbuf = NULL;
530 asize = _t_setsize(tsap->ADDR_size, B_FALSE);
531 if (tsap->OPT_size >= 0)
532 /* compensate for XTI level options */
533 optsize = tsap->OPT_size + TX_XTI_LEVEL_MAX_OPTBUF;
534 else
535 optsize = tsap->OPT_size;
536 osize = _t_setsize(optsize, B_TRUE);
539 * We compute the largest buffer size needed for this provider by
540 * adding the components. [ An extra sizeof (t_scalar_t) is added to
541 * take care of rounding off for alignment) for each buffer ]
542 * The goal here is compute the size of largest possible buffer that
543 * might be needed to hold a TPI message for the transport provider
544 * on this endpoint.
545 * Note: T_ADDR_ACK contains potentially two address buffers.
548 size2 = (unsigned int)sizeof (union T_primitives) /* TPI struct */
549 + asize + (unsigned int)sizeof (t_scalar_t) +
550 /* first addr buffer plus alignment */
551 asize + (unsigned int)sizeof (t_scalar_t) +
552 /* second addr buffer plus ailignment */
553 osize + (unsigned int)sizeof (t_scalar_t);
554 /* option buffer plus alignment */
556 if ((ctlbuf = malloc(size2)) == NULL) {
557 if (size1 != 0) {
558 free(rcvbuf);
559 free(lookdbuf);
561 return (-1);
564 if ((lookcbuf = malloc(size2)) == NULL) {
565 if (size1 != 0) {
566 free(rcvbuf);
567 free(lookdbuf);
569 free(ctlbuf);
570 return (-1);
573 tiptr->ti_rcvsize = size1;
574 tiptr->ti_rcvbuf = rcvbuf;
575 tiptr->ti_ctlsize = size2;
576 tiptr->ti_ctlbuf = ctlbuf;
579 * Note: The head of the lookbuffers list (and associated buffers)
580 * is allocated here on initialization.
581 * More allocated on demand.
583 tiptr->ti_lookbufs.tl_lookclen = 0;
584 tiptr->ti_lookbufs.tl_lookcbuf = lookcbuf;
585 tiptr->ti_lookbufs.tl_lookdlen = 0;
586 tiptr->ti_lookbufs.tl_lookdbuf = lookdbuf;
588 return (0);
593 * set sizes of buffers
595 static unsigned int
596 _t_setsize(t_scalar_t infosize, boolean_t option)
598 static size_t optinfsize;
600 switch (infosize) {
601 case T_INFINITE /* -1 */:
602 if (option) {
603 if (optinfsize == 0) {
604 size_t uc = ucred_size();
605 if (uc < DEFSIZE/2)
606 optinfsize = DEFSIZE;
607 else
608 optinfsize = ucred_size() + DEFSIZE/2;
610 return ((unsigned int)optinfsize);
612 return (DEFSIZE);
613 case T_INVALID /* -2 */:
614 return (0);
615 default:
616 return ((unsigned int) infosize);
620 static void
621 _t_reinit_tiptr(struct _ti_user *tiptr)
624 * Note: This routine is designed for a "reinitialization"
625 * Following fields are not modified here and preserved.
626 * - ti_fd field
627 * - ti_lock
628 * - ti_next
629 * - ti_prev
630 * The above fields have to be separately initialized if this
631 * is used for a fresh initialization.
634 tiptr->ti_flags = 0;
635 tiptr->ti_rcvsize = 0;
636 tiptr->ti_rcvbuf = NULL;
637 tiptr->ti_ctlsize = 0;
638 tiptr->ti_ctlbuf = NULL;
639 tiptr->ti_lookbufs.tl_lookdbuf = NULL;
640 tiptr->ti_lookbufs.tl_lookcbuf = NULL;
641 tiptr->ti_lookbufs.tl_lookdlen = 0;
642 tiptr->ti_lookbufs.tl_lookclen = 0;
643 tiptr->ti_lookbufs.tl_next = NULL;
644 tiptr->ti_maxpsz = 0;
645 tiptr->ti_tsdusize = 0;
646 tiptr->ti_etsdusize = 0;
647 tiptr->ti_cdatasize = 0;
648 tiptr->ti_ddatasize = 0;
649 tiptr->ti_servtype = 0;
650 tiptr->ti_lookcnt = 0;
651 tiptr->ti_state = 0;
652 tiptr->ti_ocnt = 0;
653 tiptr->ti_prov_flag = 0;
654 tiptr->ti_qlen = 0;
658 * Link manipulation routines.
660 * NBUCKETS hash buckets are used to give fast
661 * access. The number is derived the file descriptor softlimit
662 * number (64).
665 #define NBUCKETS 64
666 static struct _ti_user *hash_bucket[NBUCKETS];
669 * Allocates a new link and returns a pointer to it.
670 * Assumes that the caller is holding _ti_userlock via sig_mutex_lock(),
671 * so signals are deferred here.
673 static struct _ti_user *
674 add_tilink(int s)
676 struct _ti_user *tiptr;
677 struct _ti_user *prevptr;
678 struct _ti_user *curptr;
679 int x;
680 struct stat stbuf;
682 assert(MUTEX_HELD(&_ti_userlock));
684 if (s < 0 || fstat(s, &stbuf) != 0)
685 return (NULL);
687 x = s % NBUCKETS;
688 if (hash_bucket[x] != NULL) {
690 * Walk along the bucket looking for
691 * duplicate entry or the end.
693 for (curptr = hash_bucket[x]; curptr != NULL;
694 curptr = curptr->ti_next) {
695 if (curptr->ti_fd == s) {
697 * This can happen when the user has close(2)'ed
698 * a descriptor and then been allocated it again
699 * via t_open().
701 * We will re-use the existing _ti_user struct
702 * in this case rather than using the one
703 * we allocated above. If there are buffers
704 * associated with the existing _ti_user
705 * struct, they may not be the correct size,
706 * so we can not use it. We free them
707 * here and re-allocate a new ones
708 * later on.
710 free(curptr->ti_rcvbuf);
711 free(curptr->ti_ctlbuf);
712 _t_free_lookbufs(curptr);
713 _t_reinit_tiptr(curptr);
714 curptr->ti_rdev = stbuf.st_rdev;
715 curptr->ti_ino = stbuf.st_ino;
716 return (curptr);
718 prevptr = curptr;
721 * Allocate and link in a new one.
723 if ((tiptr = malloc(sizeof (*tiptr))) == NULL)
724 return (NULL);
726 * First initialize fields common with reinitialization and
727 * then other fields too
729 _t_reinit_tiptr(tiptr);
730 prevptr->ti_next = tiptr;
731 tiptr->ti_prev = prevptr;
732 } else {
734 * First entry.
736 if ((tiptr = malloc(sizeof (*tiptr))) == NULL)
737 return (NULL);
738 _t_reinit_tiptr(tiptr);
739 hash_bucket[x] = tiptr;
740 tiptr->ti_prev = NULL;
742 tiptr->ti_next = NULL;
743 tiptr->ti_fd = s;
744 tiptr->ti_rdev = stbuf.st_rdev;
745 tiptr->ti_ino = stbuf.st_ino;
746 (void) mutex_init(&tiptr->ti_lock, USYNC_THREAD, NULL);
747 return (tiptr);
751 * Find a link by descriptor
752 * Assumes that the caller is holding _ti_userlock.
754 static struct _ti_user *
755 find_tilink(int s)
757 struct _ti_user *curptr;
758 int x;
759 struct stat stbuf;
761 assert(MUTEX_HELD(&_ti_userlock));
763 if (s < 0 || fstat(s, &stbuf) != 0)
764 return (NULL);
766 x = s % NBUCKETS;
768 * Walk along the bucket looking for the descriptor.
770 for (curptr = hash_bucket[x]; curptr; curptr = curptr->ti_next) {
771 if (curptr->ti_fd == s) {
772 if (curptr->ti_rdev == stbuf.st_rdev &&
773 curptr->ti_ino == stbuf.st_ino)
774 return (curptr);
775 (void) _t_delete_tilink(s);
778 return (NULL);
782 * Assumes that the caller is holding _ti_userlock.
783 * Also assumes that all signals are blocked.
786 _t_delete_tilink(int s)
788 struct _ti_user *curptr;
789 int x;
792 * Find the link.
794 assert(MUTEX_HELD(&_ti_userlock));
795 if (s < 0)
796 return (-1);
797 x = s % NBUCKETS;
799 * Walk along the bucket looking for
800 * the descriptor.
802 for (curptr = hash_bucket[x]; curptr; curptr = curptr->ti_next) {
803 if (curptr->ti_fd == s) {
804 struct _ti_user *nextptr;
805 struct _ti_user *prevptr;
807 nextptr = curptr->ti_next;
808 prevptr = curptr->ti_prev;
809 if (prevptr)
810 prevptr->ti_next = nextptr;
811 else
812 hash_bucket[x] = nextptr;
813 if (nextptr)
814 nextptr->ti_prev = prevptr;
817 * free resource associated with the curptr
819 free(curptr->ti_rcvbuf);
820 free(curptr->ti_ctlbuf);
821 _t_free_lookbufs(curptr);
822 (void) mutex_destroy(&curptr->ti_lock);
823 free(curptr);
824 return (0);
827 return (-1);
831 * Allocate a TLI state structure and synch it with the kernel
832 * *tiptr is returned
833 * Assumes that the caller is holding the _ti_userlock and has blocked signals.
835 * This function may fail the first time it is called with given transport if it
836 * doesn't support T_CAPABILITY_REQ TPI message.
838 struct _ti_user *
839 _t_create(int fd, struct t_info *info, int api_semantics, int *t_capreq_failed)
842 * Aligned data buffer for ioctl.
844 union {
845 struct ti_sync_req ti_req;
846 struct ti_sync_ack ti_ack;
847 union T_primitives t_prim;
848 char pad[128];
849 } ioctl_data;
850 void *ioctlbuf = &ioctl_data; /* TI_SYNC/GETINFO with room to grow */
851 /* preferred location first local variable */
852 /* see note below */
854 * Note: We use "ioctlbuf" allocated on stack above with
855 * room to grow since (struct ti_sync_ack) can grow in size
856 * on future kernels. (We do not use malloc'd "ti_ctlbuf" as that
857 * part of instance structure which may not exist yet)
858 * Its preferred declaration location is first local variable in this
859 * procedure as bugs causing overruns will be detectable on
860 * platforms where procedure calling conventions place return
861 * address on stack (such as x86) instead of causing silent
862 * memory corruption.
864 struct ti_sync_req *tsrp = (struct ti_sync_req *)ioctlbuf;
865 struct ti_sync_ack *tsap = (struct ti_sync_ack *)ioctlbuf;
866 struct T_capability_req *tcrp = (struct T_capability_req *)ioctlbuf;
867 struct T_capability_ack *tcap = (struct T_capability_ack *)ioctlbuf;
868 struct T_info_ack *tiap = &tcap->INFO_ack;
869 struct _ti_user *ntiptr;
870 int expected_acksize;
871 int retlen, rstate, sv_errno, rval;
873 assert(MUTEX_HELD(&_ti_userlock));
876 * Use ioctl required for sync'ing state with kernel.
877 * We use two ioctls. TI_CAPABILITY is used to get TPI information and
878 * TI_SYNC is used to synchronise state with timod. Statically linked
879 * TLI applications will no longer work on older releases where there
880 * are no TI_SYNC and TI_CAPABILITY.
884 * Request info about transport.
885 * Assumes that TC1_INFO should always be implemented.
886 * For TI_CAPABILITY size argument to ioctl specifies maximum buffer
887 * size.
889 tcrp->PRIM_type = T_CAPABILITY_REQ;
890 tcrp->CAP_bits1 = TC1_INFO | TC1_ACCEPTOR_ID;
891 rval = _t_do_ioctl(fd, (char *)ioctlbuf,
892 (int)sizeof (struct T_capability_ack), TI_CAPABILITY, &retlen);
893 expected_acksize = (int)sizeof (struct T_capability_ack);
895 if (rval < 0) {
897 * TI_CAPABILITY may fail when transport provider doesn't
898 * support T_CAPABILITY_REQ message type. In this case file
899 * descriptor may be unusable (when transport provider sent
900 * M_ERROR in response to T_CAPABILITY_REQ). This should only
901 * happen once during system lifetime for given transport
902 * provider since timod will emulate TI_CAPABILITY after it
903 * detected the failure.
905 if (t_capreq_failed != NULL)
906 *t_capreq_failed = 1;
907 return (NULL);
910 if (retlen != expected_acksize) {
911 t_errno = TSYSERR;
912 errno = EIO;
913 return (NULL);
916 if ((tcap->CAP_bits1 & TC1_INFO) == 0) {
917 t_errno = TSYSERR;
918 errno = EPROTO;
919 return (NULL);
921 if (info != NULL) {
922 if (tiap->PRIM_type != T_INFO_ACK) {
923 t_errno = TSYSERR;
924 errno = EPROTO;
925 return (NULL);
927 info->addr = tiap->ADDR_size;
928 info->options = tiap->OPT_size;
929 info->tsdu = tiap->TSDU_size;
930 info->etsdu = tiap->ETSDU_size;
931 info->connect = tiap->CDATA_size;
932 info->discon = tiap->DDATA_size;
933 info->servtype = tiap->SERV_type;
934 if (_T_IS_XTI(api_semantics)) {
936 * XTI ONLY - TLI "struct t_info" does not
937 * have "flags"
939 info->flags = 0;
940 if (tiap->PROVIDER_flag & (SENDZERO|OLD_SENDZERO))
941 info->flags |= T_SENDZERO;
943 * Some day there MAY be a NEW bit in T_info_ack
944 * PROVIDER_flag namespace exposed by TPI header
945 * <sys/tihdr.h> which will functionally correspond to
946 * role played by T_ORDRELDATA in info->flags namespace
947 * When that bit exists, we can add a test to see if
948 * it is set and set T_ORDRELDATA.
949 * Note: Currently only mOSI ("minimal OSI") provider
950 * is specified to use T_ORDRELDATA so probability of
951 * needing it is minimal.
957 * if first time or no instance (after fork/exec, dup etc,
958 * then create initialize data structure
959 * and allocate buffers
961 ntiptr = add_tilink(fd);
962 if (ntiptr == NULL) {
963 t_errno = TSYSERR;
964 errno = ENOMEM;
965 return (NULL);
969 * Allocate buffers for the new descriptor
971 if (_t_alloc_bufs(fd, ntiptr, tiap) < 0) {
972 sv_errno = errno;
973 (void) _t_delete_tilink(fd);
974 t_errno = TSYSERR;
975 errno = sv_errno;
976 return (NULL);
979 /* Fill instance structure */
981 ntiptr->ti_lookcnt = 0;
982 ntiptr->ti_flags = USED;
983 ntiptr->ti_state = T_UNINIT;
984 ntiptr->ti_ocnt = 0;
986 assert(tiap->TIDU_size > 0);
987 ntiptr->ti_maxpsz = tiap->TIDU_size;
988 assert(tiap->TSDU_size >= -2);
989 ntiptr->ti_tsdusize = tiap->TSDU_size;
990 assert(tiap->ETSDU_size >= -2);
991 ntiptr->ti_etsdusize = tiap->ETSDU_size;
992 assert(tiap->CDATA_size >= -2);
993 ntiptr->ti_cdatasize = tiap->CDATA_size;
994 assert(tiap->DDATA_size >= -2);
995 ntiptr->ti_ddatasize = tiap->DDATA_size;
996 ntiptr->ti_servtype = tiap->SERV_type;
997 ntiptr->ti_prov_flag = tiap->PROVIDER_flag;
999 if ((tcap->CAP_bits1 & TC1_ACCEPTOR_ID) != 0) {
1000 ntiptr->acceptor_id = tcap->ACCEPTOR_id;
1001 ntiptr->ti_flags |= V_ACCEPTOR_ID;
1003 else
1004 ntiptr->ti_flags &= ~V_ACCEPTOR_ID;
1007 * Restore state from kernel (caveat some heuristics)
1009 switch (tiap->CURRENT_state) {
1011 case TS_UNBND:
1012 ntiptr->ti_state = T_UNBND;
1013 break;
1015 case TS_IDLE:
1016 if ((rstate = _t_adjust_state(fd, T_IDLE)) < 0) {
1017 sv_errno = errno;
1018 (void) _t_delete_tilink(fd);
1019 errno = sv_errno;
1020 return (NULL);
1022 ntiptr->ti_state = rstate;
1023 break;
1025 case TS_WRES_CIND:
1026 ntiptr->ti_state = T_INCON;
1027 break;
1029 case TS_WCON_CREQ:
1030 ntiptr->ti_state = T_OUTCON;
1031 break;
1033 case TS_DATA_XFER:
1034 if ((rstate = _t_adjust_state(fd, T_DATAXFER)) < 0) {
1035 sv_errno = errno;
1036 (void) _t_delete_tilink(fd);
1037 errno = sv_errno;
1038 return (NULL);
1040 ntiptr->ti_state = rstate;
1041 break;
1043 case TS_WIND_ORDREL:
1044 ntiptr->ti_state = T_OUTREL;
1045 break;
1047 case TS_WREQ_ORDREL:
1048 if ((rstate = _t_adjust_state(fd, T_INREL)) < 0) {
1049 sv_errno = errno;
1050 (void) _t_delete_tilink(fd);
1051 errno = sv_errno;
1052 return (NULL);
1054 ntiptr->ti_state = rstate;
1055 break;
1056 default:
1057 t_errno = TSTATECHNG;
1058 (void) _t_delete_tilink(fd);
1059 return (NULL);
1063 * Sync information with timod.
1065 tsrp->tsr_flags = TSRF_QLEN_REQ;
1067 rval = _t_do_ioctl(fd, ioctlbuf,
1068 (int)sizeof (struct ti_sync_req), TI_SYNC, &retlen);
1069 expected_acksize = (int)sizeof (struct ti_sync_ack);
1071 if (rval < 0) {
1072 sv_errno = errno;
1073 (void) _t_delete_tilink(fd);
1074 t_errno = TSYSERR;
1075 errno = sv_errno;
1076 return (NULL);
1080 * This is a "less than" check as "struct ti_sync_ack" returned by
1081 * TI_SYNC can grow in size in future kernels. If/when a statically
1082 * linked application is run on a future kernel, it should not fail.
1084 if (retlen < expected_acksize) {
1085 sv_errno = errno;
1086 (void) _t_delete_tilink(fd);
1087 t_errno = TSYSERR;
1088 errno = sv_errno;
1089 return (NULL);
1092 if (_T_IS_TLI(api_semantics))
1093 tsap->tsa_qlen = 0; /* not needed for TLI */
1095 ntiptr->ti_qlen = tsap->tsa_qlen;
1097 return (ntiptr);
1101 static int
1102 _t_adjust_state(int fd, int instate)
1104 char ctlbuf[sizeof (t_scalar_t)];
1105 char databuf[sizeof (int)]; /* size unimportant - anything > 0 */
1106 struct strpeek arg;
1107 int outstate, retval;
1110 * Peek at message on stream head (if any)
1111 * and see if it is data
1113 arg.ctlbuf.buf = ctlbuf;
1114 arg.ctlbuf.maxlen = (int)sizeof (ctlbuf);
1115 arg.ctlbuf.len = 0;
1117 arg.databuf.buf = databuf;
1118 arg.databuf.maxlen = (int)sizeof (databuf);
1119 arg.databuf.len = 0;
1121 arg.flags = 0;
1123 if ((retval = ioctl(fd, I_PEEK, &arg)) < 0) {
1124 t_errno = TSYSERR;
1125 return (-1);
1127 outstate = instate;
1129 * If peek shows something at stream head, then
1130 * Adjust "outstate" based on some heuristics.
1132 if (retval > 0) {
1133 switch (instate) {
1134 case T_IDLE:
1136 * The following heuristic is to handle data
1137 * ahead of T_DISCON_IND indications that might
1138 * be at the stream head waiting to be
1139 * read (T_DATA_IND or M_DATA)
1141 if (((arg.ctlbuf.len == 4) &&
1142 /* LINTED pointer cast */
1143 ((*(int32_t *)arg.ctlbuf.buf) == T_DATA_IND)) ||
1144 ((arg.ctlbuf.len == 0) && arg.databuf.len)) {
1145 outstate = T_DATAXFER;
1147 break;
1148 case T_DATAXFER:
1150 * The following heuristic is to handle
1151 * the case where the connection is established
1152 * and in data transfer state at the provider
1153 * but the T_CONN_CON has not yet been read
1154 * from the stream head.
1156 if ((arg.ctlbuf.len == 4) &&
1157 /* LINTED pointer cast */
1158 ((*(int32_t *)arg.ctlbuf.buf) == T_CONN_CON))
1159 outstate = T_OUTCON;
1160 break;
1161 case T_INREL:
1163 * The following heuristic is to handle data
1164 * ahead of T_ORDREL_IND indications that might
1165 * be at the stream head waiting to be
1166 * read (T_DATA_IND or M_DATA)
1168 if (((arg.ctlbuf.len == 4) &&
1169 /* LINTED pointer cast */
1170 ((*(int32_t *)arg.ctlbuf.buf) == T_DATA_IND)) ||
1171 ((arg.ctlbuf.len == 0) && arg.databuf.len)) {
1172 outstate = T_DATAXFER;
1174 break;
1175 default:
1176 break;
1179 return (outstate);
1183 * Assumes caller has blocked signals at least in this thread (for safe
1184 * malloc/free operations)
1186 static int
1187 _t_cbuf_alloc(struct _ti_user *tiptr, char **retbuf)
1189 unsigned size2;
1191 assert(MUTEX_HELD(&tiptr->ti_lock));
1192 size2 = tiptr->ti_ctlsize; /* same size as default ctlbuf */
1194 if ((*retbuf = malloc(size2)) == NULL) {
1195 return (-1);
1197 return (size2);
1202 * Assumes caller has blocked signals at least in this thread (for safe
1203 * malloc/free operations)
1206 _t_rbuf_alloc(struct _ti_user *tiptr, char **retbuf)
1208 unsigned size1;
1210 assert(MUTEX_HELD(&tiptr->ti_lock));
1211 size1 = tiptr->ti_rcvsize; /* same size as default rcvbuf */
1213 if ((*retbuf = malloc(size1)) == NULL) {
1214 return (-1);
1216 return (size1);
1220 * Free lookbuffer structures and associated resources
1221 * Assumes ti_lock held for MT case.
1223 static void
1224 _t_free_lookbufs(struct _ti_user *tiptr)
1226 struct _ti_lookbufs *tlbs, *prev_tlbs, *head_tlbs;
1229 * Assertion:
1230 * The structure lock should be held or the global list
1231 * manipulation lock. The assumption is that nothing
1232 * else can access the descriptor since global list manipulation
1233 * lock is held so it is OK to manipulate fields without the
1234 * structure lock
1236 assert(MUTEX_HELD(&tiptr->ti_lock) || MUTEX_HELD(&_ti_userlock));
1239 * Free only the buffers in the first lookbuf
1241 head_tlbs = &tiptr->ti_lookbufs;
1242 if (head_tlbs->tl_lookdbuf != NULL) {
1243 free(head_tlbs->tl_lookdbuf);
1244 head_tlbs->tl_lookdbuf = NULL;
1246 free(head_tlbs->tl_lookcbuf);
1247 head_tlbs->tl_lookcbuf = NULL;
1249 * Free the node and the buffers in the rest of the
1250 * list
1253 tlbs = head_tlbs->tl_next;
1254 head_tlbs->tl_next = NULL;
1256 while (tlbs != NULL) {
1257 free(tlbs->tl_lookdbuf);
1258 free(tlbs->tl_lookcbuf);
1259 prev_tlbs = tlbs;
1260 tlbs = tlbs->tl_next;
1261 free(prev_tlbs);
1266 * Free lookbuffer event list head.
1267 * Consume current lookbuffer event
1268 * Assumes ti_lock held for MT case.
1269 * Note: The head of this list is part of the instance
1270 * structure so the code is a little unorthodox.
1272 void
1273 _t_free_looklist_head(struct _ti_user *tiptr)
1275 struct _ti_lookbufs *tlbs, *next_tlbs;
1277 tlbs = &tiptr->ti_lookbufs;
1279 if (tlbs->tl_next) {
1281 * Free the control and data buffers
1283 free(tlbs->tl_lookdbuf);
1284 free(tlbs->tl_lookcbuf);
1286 * Replace with next lookbuf event contents
1288 next_tlbs = tlbs->tl_next;
1289 tlbs->tl_next = next_tlbs->tl_next;
1290 tlbs->tl_lookcbuf = next_tlbs->tl_lookcbuf;
1291 tlbs->tl_lookclen = next_tlbs->tl_lookclen;
1292 tlbs->tl_lookdbuf = next_tlbs->tl_lookdbuf;
1293 tlbs->tl_lookdlen = next_tlbs->tl_lookdlen;
1294 free(next_tlbs);
1296 * Decrement the flag - should never get to zero.
1297 * in this path
1299 tiptr->ti_lookcnt--;
1300 assert(tiptr->ti_lookcnt > 0);
1301 } else {
1303 * No more look buffer events - just clear the flag
1304 * and leave the buffers alone
1306 assert(tiptr->ti_lookcnt == 1);
1307 tiptr->ti_lookcnt = 0;
1312 * Discard lookbuffer events.
1313 * Assumes ti_lock held for MT case.
1315 void
1316 _t_flush_lookevents(struct _ti_user *tiptr)
1318 struct _ti_lookbufs *tlbs, *prev_tlbs;
1321 * Leave the first nodes buffers alone (i.e. allocated)
1322 * but reset the flag.
1324 assert(MUTEX_HELD(&tiptr->ti_lock));
1325 tiptr->ti_lookcnt = 0;
1327 * Blow away the rest of the list
1329 tlbs = tiptr->ti_lookbufs.tl_next;
1330 tiptr->ti_lookbufs.tl_next = NULL;
1331 while (tlbs != NULL) {
1332 free(tlbs->tl_lookdbuf);
1333 free(tlbs->tl_lookcbuf);
1334 prev_tlbs = tlbs;
1335 tlbs = tlbs->tl_next;
1336 free(prev_tlbs);
1342 * This routine checks if the receive. buffer in the instance structure
1343 * is available (non-null). If it is, the buffer is acquired and marked busy
1344 * (null). If it is busy (possible in MT programs), it allocates a new
1345 * buffer and sets a flag indicating new memory was allocated and the caller
1346 * has to free it.
1349 _t_acquire_ctlbuf(
1350 struct _ti_user *tiptr,
1351 struct strbuf *ctlbufp,
1352 int *didallocp)
1354 *didallocp = 0;
1356 ctlbufp->len = 0;
1357 if (tiptr->ti_ctlbuf) {
1358 ctlbufp->buf = tiptr->ti_ctlbuf;
1359 tiptr->ti_ctlbuf = NULL;
1360 ctlbufp->maxlen = tiptr->ti_ctlsize;
1361 } else {
1363 * tiptr->ti_ctlbuf is in use
1364 * allocate new buffer and free after use.
1366 if ((ctlbufp->maxlen = _t_cbuf_alloc(tiptr,
1367 &ctlbufp->buf)) < 0) {
1368 t_errno = TSYSERR;
1369 return (-1);
1371 *didallocp = 1;
1373 return (0);
1377 * This routine checks if the receive buffer in the instance structure
1378 * is available (non-null). If it is, the buffer is acquired and marked busy
1379 * (null). If it is busy (possible in MT programs), it allocates a new
1380 * buffer and sets a flag indicating new memory was allocated and the caller
1381 * has to free it.
1382 * Note: The receive buffer pointer can also be null if the transport
1383 * provider does not support connect/disconnect data, (e.g. TCP) - not
1384 * just when it is "busy". In that case, ti_rcvsize will be 0 and that is
1385 * used to instantiate the databuf which points to a null buffer of
1386 * length 0 which is the right thing to do for that case.
1389 _t_acquire_databuf(
1390 struct _ti_user *tiptr,
1391 struct strbuf *databufp,
1392 int *didallocp)
1394 *didallocp = 0;
1396 databufp->len = 0;
1397 if (tiptr->ti_rcvbuf) {
1398 assert(tiptr->ti_rcvsize != 0);
1399 databufp->buf = tiptr->ti_rcvbuf;
1400 tiptr->ti_rcvbuf = NULL;
1401 databufp->maxlen = tiptr->ti_rcvsize;
1402 } else if (tiptr->ti_rcvsize == 0) {
1403 databufp->buf = NULL;
1404 databufp->maxlen = 0;
1405 } else {
1407 * tiptr->ti_rcvbuf is in use
1408 * allocate new buffer and free after use.
1410 if ((databufp->maxlen = _t_rbuf_alloc(tiptr,
1411 &databufp->buf)) < 0) {
1412 t_errno = TSYSERR;
1413 return (-1);
1415 *didallocp = 1;
1417 return (0);
1421 * This routine requests timod to look for any expedited data
1422 * queued in the "receive buffers" in the kernel. Used for XTI
1423 * t_look() semantics for transports that send expedited data
1424 * data inline (e.g TCP).
1425 * Returns -1 for failure
1426 * Returns 0 for success
1427 * On a successful return, the location pointed by "expedited_queuedp"
1428 * contains
1429 * 0 if no expedited data is found queued in "receive buffers"
1430 * 1 if expedited data is found queued in "receive buffers"
1434 _t_expinline_queued(int fd, int *expedited_queuedp)
1436 union {
1437 struct ti_sync_req ti_req;
1438 struct ti_sync_ack ti_ack;
1439 char pad[128];
1440 } ioctl_data;
1441 void *ioctlbuf = &ioctl_data; /* for TI_SYNC with room to grow */
1442 /* preferred location first local variable */
1443 /* see note in _t_create above */
1444 struct ti_sync_req *tsrp = (struct ti_sync_req *)ioctlbuf;
1445 struct ti_sync_ack *tsap = (struct ti_sync_ack *)ioctlbuf;
1446 int rval, retlen;
1448 *expedited_queuedp = 0;
1449 /* request info on rq expinds */
1450 tsrp->tsr_flags = TSRF_IS_EXP_IN_RCVBUF;
1451 do {
1452 rval = _t_do_ioctl(fd, ioctlbuf,
1453 (int)sizeof (struct T_info_req), TI_SYNC, &retlen);
1454 } while (rval < 0 && errno == EINTR);
1456 if (rval < 0)
1457 return (-1);
1460 * This is a "less than" check as "struct ti_sync_ack" returned by
1461 * TI_SYNC can grow in size in future kernels. If/when a statically
1462 * linked application is run on a future kernel, it should not fail.
1464 if (retlen < (int)sizeof (struct ti_sync_ack)) {
1465 t_errno = TSYSERR;
1466 errno = EIO;
1467 return (-1);
1469 if (tsap->tsa_flags & TSAF_EXP_QUEUED)
1470 *expedited_queuedp = 1;
1471 return (0);
1475 * Support functions for use by functions that do scatter/gather
1476 * like t_sndv(), t_rcvv() etc..follow below.
1480 * _t_bytecount_upto_intmax() :
1481 * Sum of the lengths of the individual buffers in
1482 * the t_iovec array. If the sum exceeds INT_MAX
1483 * it is truncated to INT_MAX.
1485 unsigned int
1486 _t_bytecount_upto_intmax(const struct t_iovec *tiov, unsigned int tiovcount)
1488 size_t nbytes;
1489 int i;
1491 nbytes = 0;
1492 for (i = 0; i < tiovcount && nbytes < INT_MAX; i++) {
1493 if (tiov[i].iov_len >= INT_MAX) {
1494 nbytes = INT_MAX;
1495 break;
1497 nbytes += tiov[i].iov_len;
1500 if (nbytes > INT_MAX)
1501 nbytes = INT_MAX;
1503 return ((unsigned int)nbytes);
1507 * Gather the data in the t_iovec buffers, into a single linear buffer
1508 * starting at dataptr. Caller must have allocated sufficient space
1509 * starting at dataptr. The total amount of data that is gathered is
1510 * limited to INT_MAX. Any remaining data in the t_iovec buffers is
1511 * not copied.
1513 void
1514 _t_gather(char *dataptr, const struct t_iovec *tiov, unsigned int tiovcount)
1516 char *curptr;
1517 unsigned int cur_count;
1518 unsigned int nbytes_remaining;
1519 int i;
1521 curptr = dataptr;
1522 cur_count = 0;
1524 nbytes_remaining = _t_bytecount_upto_intmax(tiov, tiovcount);
1525 for (i = 0; i < tiovcount && nbytes_remaining != 0; i++) {
1526 if (tiov[i].iov_len <= nbytes_remaining)
1527 cur_count = (int)tiov[i].iov_len;
1528 else
1529 cur_count = nbytes_remaining;
1530 (void) memcpy(curptr, tiov[i].iov_base, cur_count);
1531 curptr += cur_count;
1532 nbytes_remaining -= cur_count;
1537 * Scatter the data from the single linear buffer at pdatabuf->buf into
1538 * the t_iovec buffers.
1540 void
1541 _t_scatter(struct strbuf *pdatabuf, struct t_iovec *tiov, int tiovcount)
1543 char *curptr;
1544 unsigned int nbytes_remaining;
1545 unsigned int curlen;
1546 int i;
1549 * There cannot be any uncopied data leftover in pdatabuf
1550 * at the conclusion of this function. (asserted below)
1552 assert(pdatabuf->len <= _t_bytecount_upto_intmax(tiov, tiovcount));
1553 curptr = pdatabuf->buf;
1554 nbytes_remaining = pdatabuf->len;
1555 for (i = 0; i < tiovcount && nbytes_remaining != 0; i++) {
1556 if (tiov[i].iov_len < nbytes_remaining)
1557 curlen = (unsigned int)tiov[i].iov_len;
1558 else
1559 curlen = nbytes_remaining;
1560 (void) memcpy(tiov[i].iov_base, curptr, curlen);
1561 curptr += curlen;
1562 nbytes_remaining -= curlen;
1567 * Adjust the iovec array, for subsequent use. Examine each element in the
1568 * iovec array,and zero out the iov_len if the buffer was sent fully.
1569 * otherwise the buffer was only partially sent, so adjust both iov_len and
1570 * iov_base.
1573 void
1574 _t_adjust_iov(int bytes_sent, struct iovec *iov, int *iovcountp)
1577 int i;
1579 for (i = 0; i < *iovcountp && bytes_sent; i++) {
1580 if (iov[i].iov_len == 0)
1581 continue;
1582 if (bytes_sent < iov[i].iov_len)
1583 break;
1584 else {
1585 bytes_sent -= iov[i].iov_len;
1586 iov[i].iov_len = 0;
1589 iov[i].iov_len -= bytes_sent;
1590 iov[i].iov_base += bytes_sent;
1594 * Copy the t_iovec array to the iovec array while taking care to see
1595 * that the sum of the buffer lengths in the result is not more than
1596 * INT_MAX. This function requires that T_IOV_MAX is no larger than
1597 * IOV_MAX. Otherwise the resulting array is not a suitable input to
1598 * writev(). If the sum of the lengths in t_iovec is zero, so is the
1599 * resulting iovec.
1601 void
1602 _t_copy_tiov_to_iov(const struct t_iovec *tiov, int tiovcount,
1603 struct iovec *iov, int *iovcountp)
1605 int i;
1606 unsigned int nbytes_remaining;
1608 nbytes_remaining = _t_bytecount_upto_intmax(tiov, tiovcount);
1609 i = 0;
1610 do {
1611 iov[i].iov_base = tiov[i].iov_base;
1612 if (tiov[i].iov_len > nbytes_remaining)
1613 iov[i].iov_len = nbytes_remaining;
1614 else
1615 iov[i].iov_len = tiov[i].iov_len;
1616 nbytes_remaining -= iov[i].iov_len;
1617 i++;
1618 } while (nbytes_remaining != 0 && i < tiovcount);
1620 *iovcountp = i;
1624 * Routine called after connection establishment on transports where
1625 * connection establishment changes certain transport attributes such as
1626 * TIDU_size
1629 _t_do_postconn_sync(int fd, struct _ti_user *tiptr)
1631 union {
1632 struct T_capability_req tc_req;
1633 struct T_capability_ack tc_ack;
1634 } ioctl_data;
1636 void *ioctlbuf = &ioctl_data;
1637 int expected_acksize;
1638 int retlen, rval;
1639 struct T_capability_req *tc_reqp = (struct T_capability_req *)ioctlbuf;
1640 struct T_capability_ack *tc_ackp = (struct T_capability_ack *)ioctlbuf;
1641 struct T_info_ack *tiap;
1644 * This T_CAPABILITY_REQ should not fail, even if it is unsupported
1645 * by the transport provider. timod will emulate it in that case.
1647 tc_reqp->PRIM_type = T_CAPABILITY_REQ;
1648 tc_reqp->CAP_bits1 = TC1_INFO;
1649 rval = _t_do_ioctl(fd, (char *)ioctlbuf,
1650 (int)sizeof (struct T_capability_ack), TI_CAPABILITY, &retlen);
1651 expected_acksize = (int)sizeof (struct T_capability_ack);
1653 if (rval < 0)
1654 return (-1);
1657 * T_capability TPI messages are extensible and can grow in future.
1658 * However timod will take care of returning no more information
1659 * than what was requested, and truncating the "extended"
1660 * information towards the end of the T_capability_ack, if necessary.
1662 if (retlen != expected_acksize) {
1663 t_errno = TSYSERR;
1664 errno = EIO;
1665 return (-1);
1669 * The T_info_ack part of the T_capability_ack is guaranteed to be
1670 * present only if the corresponding TC1_INFO bit is set
1672 if ((tc_ackp->CAP_bits1 & TC1_INFO) == 0) {
1673 t_errno = TSYSERR;
1674 errno = EPROTO;
1675 return (-1);
1678 tiap = &tc_ackp->INFO_ack;
1679 if (tiap->PRIM_type != T_INFO_ACK) {
1680 t_errno = TSYSERR;
1681 errno = EPROTO;
1682 return (-1);
1686 * Note: Sync with latest information returned in "struct T_info_ack
1687 * but we deliberately not sync the state here as user level state
1688 * construction here is not required, only update of attributes which
1689 * may have changed because of negotations during connection
1690 * establsihment
1692 assert(tiap->TIDU_size > 0);
1693 tiptr->ti_maxpsz = tiap->TIDU_size;
1694 assert(tiap->TSDU_size >= T_INVALID);
1695 tiptr->ti_tsdusize = tiap->TSDU_size;
1696 assert(tiap->ETSDU_size >= T_INVALID);
1697 tiptr->ti_etsdusize = tiap->ETSDU_size;
1698 assert(tiap->CDATA_size >= T_INVALID);
1699 tiptr->ti_cdatasize = tiap->CDATA_size;
1700 assert(tiap->DDATA_size >= T_INVALID);
1701 tiptr->ti_ddatasize = tiap->DDATA_size;
1702 tiptr->ti_prov_flag = tiap->PROVIDER_flag;
1704 return (0);