4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, Joyent Inc. All rights reserved.
25 * Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
26 * Copyright (c) 2013,2014 by Delphix. All rights reserved.
27 * Copyright 2014, OmniTI Computer Consulting, Inc. All rights reserved.
29 /* Copyright (c) 1990 Mentat Inc. */
31 #include <sys/types.h>
32 #include <sys/stream.h>
33 #include <sys/strsun.h>
34 #include <sys/strsubr.h>
35 #include <sys/stropts.h>
36 #include <sys/strlog.h>
37 #define _SUN_TPI_VERSION 2
38 #include <sys/tihdr.h>
39 #include <sys/timod.h>
41 #include <sys/sunddi.h>
42 #include <sys/suntpi.h>
43 #include <sys/xti_inet.h>
44 #include <sys/cmn_err.h>
45 #include <sys/debug.h>
47 #include <sys/vtrace.h>
49 #include <sys/ethernet.h>
50 #include <sys/cpuvar.h>
52 #include <sys/pattr.h>
53 #include <sys/policy.h>
56 #include <sys/sunldi.h>
58 #include <sys/errno.h>
59 #include <sys/signal.h>
60 #include <sys/socket.h>
61 #include <sys/socketvar.h>
62 #include <sys/sockio.h>
63 #include <sys/isa_defs.h>
65 #include <sys/random.h>
67 #include <sys/systm.h>
68 #include <netinet/in.h>
69 #include <netinet/tcp.h>
70 #include <netinet/ip6.h>
71 #include <netinet/icmp6.h>
73 #include <net/route.h>
74 #include <inet/ipsec_impl.h>
76 #include <inet/common.h>
78 #include <inet/ip_impl.h>
80 #include <inet/ip_ndp.h>
81 #include <inet/proto_set.h>
82 #include <inet/mib2.h>
83 #include <inet/optcom.h>
84 #include <inet/snmpcom.h>
85 #include <inet/kstatcom.h>
87 #include <inet/tcp_impl.h>
88 #include <inet/udp_impl.h>
89 #include <net/pfkeyv2.h>
90 #include <inet/ipdrop.h>
92 #include <inet/ipclassifier.h>
93 #include <inet/ip_ire.h>
94 #include <inet/ip_ftable.h>
95 #include <inet/ip_if.h>
96 #include <inet/ipp_common.h>
97 #include <inet/ip_rts.h>
98 #include <inet/ip_netinfo.h>
99 #include <sys/squeue_impl.h>
100 #include <sys/squeue.h>
101 #include <rpc/pmap_prot.h>
102 #include <sys/callo.h>
105 * TCP Notes: aka FireEngine Phase I (PSARC 2002/433)
107 * (Read the detailed design doc in PSARC case directory)
109 * The entire tcp state is contained in tcp_t and conn_t structure
110 * which are allocated in tandem using ipcl_conn_create() and passing
111 * IPCL_TCPCONN as a flag. We use 'conn_ref' and 'conn_lock' to protect
112 * the references on the tcp_t. The tcp_t structure is never compressed
113 * and packets always land on the correct TCP perimeter from the time
114 * eager is created till the time tcp_t dies (as such the old mentat
115 * TCP global queue is not used for detached state and no IPSEC checking
116 * is required). The global queue is still allocated to send out resets
117 * for connection which have no listeners and IP directly calls
118 * tcp_xmit_listeners_reset() which does any policy check.
120 * Protection and Synchronisation mechanism:
122 * The tcp data structure does not use any kind of lock for protecting
123 * its state but instead uses 'squeues' for mutual exclusion from various
124 * read and write side threads. To access a tcp member, the thread should
125 * always be behind squeue (via squeue_enter with flags as SQ_FILL, SQ_PROCESS,
126 * or SQ_NODRAIN). Since the squeues allow a direct function call, caller
127 * can pass any tcp function having prototype of edesc_t as argument
128 * (different from traditional STREAMs model where packets come in only
129 * designated entry points). The list of functions that can be directly
130 * called via squeue are listed before the usual function prototype.
134 * TCP is MT-Hot and we use a reference based scheme to make sure that the
135 * tcp structure doesn't disappear when its needed. When the application
136 * creates an outgoing connection or accepts an incoming connection, we
137 * start out with 2 references on 'conn_ref'. One for TCP and one for IP.
138 * The IP reference is just a symbolic reference since ip_tcpclose()
139 * looks at tcp structure after tcp_close_output() returns which could
140 * have dropped the last TCP reference. So as long as the connection is
141 * in attached state i.e. !TCP_IS_DETACHED, we have 2 references on the
142 * conn_t. The classifier puts its own reference when the connection is
143 * inserted in listen or connected hash. Anytime a thread needs to enter
144 * the tcp connection perimeter, it retrieves the conn/tcp from q->ptr
145 * on write side or by doing a classify on read side and then puts a
146 * reference on the conn before doing squeue_enter/tryenter/fill. For
147 * read side, the classifier itself puts the reference under fanout lock
148 * to make sure that tcp can't disappear before it gets processed. The
149 * squeue will drop this reference automatically so the called function
150 * doesn't have to do a DEC_REF.
152 * Opening a new connection:
154 * The outgoing connection open is pretty simple. tcp_open() does the
155 * work in creating the conn/tcp structure and initializing it. The
156 * squeue assignment is done based on the CPU the application
157 * is running on. So for outbound connections, processing is always done
158 * on application CPU which might be different from the incoming CPU
159 * being interrupted by the NIC. An optimal way would be to figure out
160 * the NIC <-> CPU binding at listen time, and assign the outgoing
161 * connection to the squeue attached to the CPU that will be interrupted
162 * for incoming packets (we know the NIC based on the bind IP address).
163 * This might seem like a problem if more data is going out but the
164 * fact is that in most cases the transmit is ACK driven transmit where
165 * the outgoing data normally sits on TCP's xmit queue waiting to be
168 * Accepting a connection:
170 * This is a more interesting case because of various races involved in
171 * establishing a eager in its own perimeter. Read the meta comment on
172 * top of tcp_input_listener(). But briefly, the squeue is picked by
173 * ip_fanout based on the ring or the sender (if loopback).
175 * Closing a connection:
177 * The close is fairly straight forward. tcp_close() calls tcp_close_output()
178 * via squeue to do the close and mark the tcp as detached if the connection
179 * was in state TCPS_ESTABLISHED or greater. In the later case, TCP keep its
180 * reference but tcp_close() drop IP's reference always. So if tcp was
181 * not killed, it is sitting in time_wait list with 2 reference - 1 for TCP
182 * and 1 because it is in classifier's connected hash. This is the condition
183 * we use to determine that its OK to clean up the tcp outside of squeue
184 * when time wait expires (check the ref under fanout and conn_lock and
185 * if it is 2, remove it from fanout hash and kill it).
187 * Although close just drops the necessary references and marks the
188 * tcp_detached state, tcp_close needs to know the tcp_detached has been
189 * set (under squeue) before letting the STREAM go away (because a
190 * inbound packet might attempt to go up the STREAM while the close
191 * has happened and tcp_detached is not set). So a special lock and
192 * flag is used along with a condition variable (tcp_closelock, tcp_closed,
193 * and tcp_closecv) to signal tcp_close that tcp_close_out() has marked
196 * Special provisions and fast paths:
198 * We make special provisions for sockfs by marking tcp_issocket
199 * whenever we have only sockfs on top of TCP. This allows us to skip
200 * putting the tcp in acceptor hash since a sockfs listener can never
201 * become acceptor and also avoid allocating a tcp_t for acceptor STREAM
202 * since eager has already been allocated and the accept now happens
203 * on acceptor STREAM. There is a big blob of comment on top of
204 * tcp_input_listener explaining the new accept. When socket is POP'd,
205 * sockfs sends us an ioctl to mark the fact and we go back to old
206 * behaviour. Once tcp_issocket is unset, its never set for the
207 * life of that connection.
211 * Since a packet is always executed on the correct TCP perimeter
212 * all IPsec processing is defered to IP including checking new
213 * connections and setting IPSEC policies for new connection. The
214 * only exception is tcp_xmit_listeners_reset() which is called
215 * directly from IP and needs to policy check to see if TH_RST
220 * Values for squeue switch:
225 int tcp_squeue_wput
= 2; /* /etc/systems */
229 * To prevent memory hog, limit the number of entries in tcp_free_list
230 * to 1% of available memory / number of cpus
232 uint_t tcp_free_list_max_cnt
= 0;
234 #define TIDUSZ 4096 /* transport interface data unit size */
237 * Size of acceptor hash list. It has to be a power of 2 for hashing.
239 #define TCP_ACCEPTOR_FANOUT_SIZE 512
242 #define TCP_ACCEPTOR_HASH(accid) \
243 (((uint_t)(accid) >> 8) & (TCP_ACCEPTOR_FANOUT_SIZE - 1))
245 #define TCP_ACCEPTOR_HASH(accid) \
246 ((uint_t)(accid) & (TCP_ACCEPTOR_FANOUT_SIZE - 1))
250 * Minimum number of connections which can be created per listener. Used
251 * when the listener connection count is in effect.
253 static uint32_t tcp_min_conn_listener
= 2;
255 uint32_t tcp_early_abort
= 30;
257 /* TCP Timer control structure */
258 typedef struct tcpt_s
{
259 pfv_t tcpt_pfv
; /* The routine we are to call */
260 tcp_t
*tcpt_tcp
; /* The parameter we are to pass in */
264 * Functions called directly via squeue having a prototype of edesc_t.
266 void tcp_input_listener(void *arg
, mblk_t
*mp
, void *arg2
,
267 ip_recv_attr_t
*ira
);
268 void tcp_input_data(void *arg
, mblk_t
*mp
, void *arg2
,
269 ip_recv_attr_t
*ira
);
270 static void tcp_linger_interrupted(void *arg
, mblk_t
*mp
, void *arg2
,
271 ip_recv_attr_t
*dummy
);
274 /* Prototype for TCP functions */
275 static int tcp_connect_ipv4(tcp_t
*tcp
, ipaddr_t
*dstaddrp
,
276 in_port_t dstport
, uint_t srcid
);
277 static int tcp_connect_ipv6(tcp_t
*tcp
, in6_addr_t
*dstaddrp
,
278 in_port_t dstport
, uint32_t flowinfo
,
279 uint_t srcid
, uint32_t scope_id
);
280 static void tcp_iss_init(tcp_t
*tcp
);
281 static void tcp_reinit(tcp_t
*tcp
);
282 static void tcp_reinit_values(tcp_t
*tcp
);
284 static void tcp_wsrv(queue_t
*q
);
285 static void tcp_update_lso(tcp_t
*tcp
, ip_xmit_attr_t
*ixa
);
286 static void tcp_update_zcopy(tcp_t
*tcp
);
287 static void tcp_notify(void *, ip_xmit_attr_t
*, ixa_notify_type_t
,
289 static void *tcp_stack_init(netstackid_t stackid
, netstack_t
*ns
);
290 static void tcp_stack_fini(netstackid_t stackid
, void *arg
);
292 static int tcp_squeue_switch(int);
294 static int tcp_open(queue_t
*, dev_t
*, int, int, cred_t
*, boolean_t
);
295 static int tcp_openv4(queue_t
*, dev_t
*, int, int, cred_t
*);
296 static int tcp_openv6(queue_t
*, dev_t
*, int, int, cred_t
*);
298 static void tcp_squeue_add(squeue_t
*);
300 struct module_info tcp_rinfo
= {
301 TCP_MOD_ID
, TCP_MOD_NAME
, 0, INFPSZ
, TCP_RECV_HIWATER
, TCP_RECV_LOWATER
304 static struct module_info tcp_winfo
= {
305 TCP_MOD_ID
, TCP_MOD_NAME
, 0, INFPSZ
, 127, 16
309 * Entry points for TCP as a device. The normal case which supports
310 * the TCP functionality.
311 * We have separate open functions for the /dev/tcp and /dev/tcp6 devices.
313 struct qinit tcp_rinitv4
= {
314 NULL
, (pfi_t
)tcp_rsrv
, tcp_openv4
, tcp_tpi_close
, NULL
, &tcp_rinfo
317 struct qinit tcp_rinitv6
= {
318 NULL
, (pfi_t
)tcp_rsrv
, tcp_openv6
, tcp_tpi_close
, NULL
, &tcp_rinfo
321 struct qinit tcp_winit
= {
322 (pfi_t
)tcp_wput
, (pfi_t
)tcp_wsrv
, NULL
, NULL
, NULL
, &tcp_winfo
325 /* Initial entry point for TCP in socket mode. */
326 struct qinit tcp_sock_winit
= {
327 (pfi_t
)tcp_wput_sock
, (pfi_t
)tcp_wsrv
, NULL
, NULL
, NULL
, &tcp_winfo
330 /* TCP entry point during fallback */
331 struct qinit tcp_fallback_sock_winit
= {
332 (pfi_t
)tcp_wput_fallback
, NULL
, NULL
, NULL
, NULL
, &tcp_winfo
336 * Entry points for TCP as a acceptor STREAM opened by sockfs when doing
337 * an accept. Avoid allocating data structures since eager has already
340 struct qinit tcp_acceptor_rinit
= {
341 NULL
, (pfi_t
)tcp_rsrv
, NULL
, tcp_tpi_close_accept
, NULL
, &tcp_winfo
344 struct qinit tcp_acceptor_winit
= {
345 (pfi_t
)tcp_tpi_accept
, NULL
, NULL
, NULL
, NULL
, &tcp_winfo
348 /* For AF_INET aka /dev/tcp */
349 struct streamtab tcpinfov4
= {
350 &tcp_rinitv4
, &tcp_winit
353 /* For AF_INET6 aka /dev/tcp6 */
354 struct streamtab tcpinfov6
= {
355 &tcp_rinitv6
, &tcp_winit
359 * Following assumes TPI alignment requirements stay along 32 bit
362 #define ROUNDUP32(x) \
363 (((x) + (sizeof (int32_t) - 1)) & ~(sizeof (int32_t) - 1))
365 /* Template for response to info request. */
366 struct T_info_ack tcp_g_t_info_ack
= {
367 T_INFO_ACK
, /* PRIM_type */
369 T_INFINITE
, /* ETSDU_size */
370 T_INVALID
, /* CDATA_size */
371 T_INVALID
, /* DDATA_size */
372 sizeof (sin_t
), /* ADDR_size */
373 0, /* OPT_size - not initialized here */
374 TIDUSZ
, /* TIDU_size */
375 T_COTS_ORD
, /* SERV_type */
376 TCPS_IDLE
, /* CURRENT_state */
377 (XPG4_1
|EXPINLINE
) /* PROVIDER_flag */
380 struct T_info_ack tcp_g_t_info_ack_v6
= {
381 T_INFO_ACK
, /* PRIM_type */
383 T_INFINITE
, /* ETSDU_size */
384 T_INVALID
, /* CDATA_size */
385 T_INVALID
, /* DDATA_size */
386 sizeof (sin6_t
), /* ADDR_size */
387 0, /* OPT_size - not initialized here */
388 TIDUSZ
, /* TIDU_size */
389 T_COTS_ORD
, /* SERV_type */
390 TCPS_IDLE
, /* CURRENT_state */
391 (XPG4_1
|EXPINLINE
) /* PROVIDER_flag */
395 * TCP tunables related declarations. Definitions are in tcp_tunables.c
397 extern mod_prop_info_t tcp_propinfo_tbl
[];
398 extern int tcp_propinfo_count
;
400 #define IS_VMLOANED_MBLK(mp) \
401 (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0)
403 uint32_t do_tcpzcopy
= 1; /* 0: disable, 1: enable, 2: force */
406 * Forces all connections to obey the value of the tcps_maxpsz_multiplier
407 * tunable settable via NDD. Otherwise, the per-connection behavior is
408 * determined dynamically during tcp_set_destination(), which is the default.
410 boolean_t tcp_static_maxpsz
= B_FALSE
;
413 * If the receive buffer size is changed, this function is called to update
414 * the upper socket layer on the new delayed receive wake up threshold.
417 tcp_set_recv_threshold(tcp_t
*tcp
, uint32_t new_rcvthresh
)
419 uint32_t default_threshold
= SOCKET_RECVHIWATER
>> 3;
421 if (IPCL_IS_NONSTR(tcp
->tcp_connp
)) {
422 conn_t
*connp
= tcp
->tcp_connp
;
423 struct sock_proto_props sopp
;
426 * only increase rcvthresh upto default_threshold
428 if (new_rcvthresh
> default_threshold
)
429 new_rcvthresh
= default_threshold
;
431 sopp
.sopp_flags
= SOCKOPT_RCVTHRESH
;
432 sopp
.sopp_rcvthresh
= new_rcvthresh
;
434 (*connp
->conn_upcalls
->su_set_proto_props
)
435 (connp
->conn_upper_handle
, &sopp
);
440 * Figure out the value of window scale opton. Note that the rwnd is
441 * ASSUMED to be rounded up to the nearest MSS before the calculation.
442 * We cannot find the scale value and then do a round up of tcp_rwnd
443 * because the scale value may not be correct after that.
445 * Set the compiler flag to make this function inline.
448 tcp_set_ws_value(tcp_t
*tcp
)
451 uint32_t rwnd
= tcp
->tcp_rwnd
;
453 for (i
= 0; rwnd
> TCP_MAXWIN
&& i
< TCP_MAX_WINSHIFT
;
460 * Remove cached/latched IPsec references.
463 tcp_ipsec_cleanup(tcp_t
*tcp
)
465 conn_t
*connp
= tcp
->tcp_connp
;
467 ASSERT(connp
->conn_flags
& IPCL_TCPCONN
);
469 if (connp
->conn_latch
!= NULL
) {
470 IPLATCH_REFRELE(connp
->conn_latch
);
471 connp
->conn_latch
= NULL
;
473 if (connp
->conn_latch_in_policy
!= NULL
) {
474 IPPOL_REFRELE(connp
->conn_latch_in_policy
);
475 connp
->conn_latch_in_policy
= NULL
;
477 if (connp
->conn_latch_in_action
!= NULL
) {
478 IPACT_REFRELE(connp
->conn_latch_in_action
);
479 connp
->conn_latch_in_action
= NULL
;
481 if (connp
->conn_policy
!= NULL
) {
482 IPPH_REFRELE(connp
->conn_policy
, connp
->conn_netstack
);
483 connp
->conn_policy
= NULL
;
488 * Cleaup before placing on free list.
489 * Disassociate from the netstack/tcp_stack_t since the freelist
490 * is per squeue and not per netstack.
493 tcp_cleanup(tcp_t
*tcp
)
496 conn_t
*connp
= tcp
->tcp_connp
;
497 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
498 netstack_t
*ns
= tcps
->tcps_netstack
;
501 tcp_bind_hash_remove(tcp
);
503 /* Cleanup that which needs the netstack first */
504 tcp_ipsec_cleanup(tcp
);
505 ixa_cleanup(connp
->conn_ixa
);
507 if (connp
->conn_ht_iphc
!= NULL
) {
508 kmem_free(connp
->conn_ht_iphc
, connp
->conn_ht_iphc_allocated
);
509 connp
->conn_ht_iphc
= NULL
;
510 connp
->conn_ht_iphc_allocated
= 0;
511 connp
->conn_ht_iphc_len
= 0;
512 connp
->conn_ht_ulp
= NULL
;
513 connp
->conn_ht_ulp_len
= 0;
514 tcp
->tcp_ipha
= NULL
;
515 tcp
->tcp_ip6h
= NULL
;
516 tcp
->tcp_tcpha
= NULL
;
519 /* We clear any IP_OPTIONS and extension headers */
520 ip_pkt_free(&connp
->conn_xmit_ipp
);
525 * Since we will bzero the entire structure, we need to
526 * remove it and reinsert it in global hash list. We
527 * know the walkers can't get to this conn because we
528 * had set CONDEMNED flag earlier and checked reference
529 * under conn_lock so walker won't pick it and when we
530 * go the ipcl_globalhash_remove() below, no walker
533 ipcl_globalhash_remove(connp
);
535 /* Save some state */
536 mp
= tcp
->tcp_timercache
;
538 tcp_rsrv_mp
= tcp
->tcp_rsrv_mp
;
540 if (connp
->conn_cred
!= NULL
) {
541 crfree(connp
->conn_cred
);
542 connp
->conn_cred
= NULL
;
544 ipcl_conn_cleanup(connp
);
545 connp
->conn_flags
= IPCL_TCPCONN
;
548 * Now it is safe to decrement the reference counts.
549 * This might be the last reference on the netstack
550 * in which case it will cause the freeing of the IP Instance.
552 connp
->conn_netstack
= NULL
;
553 connp
->conn_ixa
->ixa_ipst
= NULL
;
555 ASSERT(tcps
!= NULL
);
556 tcp
->tcp_tcps
= NULL
;
558 bzero(tcp
, sizeof (tcp_t
));
560 /* restore the state */
561 tcp
->tcp_timercache
= mp
;
563 tcp
->tcp_rsrv_mp
= tcp_rsrv_mp
;
565 tcp
->tcp_connp
= connp
;
567 ASSERT(connp
->conn_tcp
== tcp
);
568 ASSERT(connp
->conn_flags
& IPCL_TCPCONN
);
569 connp
->conn_state_flags
= CONN_INCIPIENT
;
570 ASSERT(connp
->conn_proto
== IPPROTO_TCP
);
571 ASSERT(connp
->conn_ref
== 1);
575 * Adapt to the information, such as rtt and rtt_sd, provided from the
576 * DCE and IRE maintained by IP.
578 * Checks for multicast and broadcast destination address.
579 * Returns zero if ok; an errno on failure.
581 * Note that the MSS calculation here is based on the info given in
582 * the DCE and IRE. We do not do any calculation based on TCP options. They
583 * will be handled in tcp_input_data() when TCP knows which options to use.
585 * Note on how TCP gets its parameters for a connection.
587 * When a tcp_t structure is allocated, it gets all the default parameters.
588 * In tcp_set_destination(), it gets those metric parameters, like rtt, rtt_sd,
589 * spipe, rpipe, ... from the route metrics. Route metric overrides the
592 * An incoming SYN with a multicast or broadcast destination address is dropped
593 * in ip_fanout_v4/v6.
595 * An incoming SYN with a multicast or broadcast source address is always
596 * dropped in tcp_set_destination, since IPDF_ALLOW_MCBC is not set in
598 * The same logic in tcp_set_destination also serves to
599 * reject an attempt to connect to a broadcast or multicast (destination)
603 tcp_set_destination(tcp_t
*tcp
)
607 boolean_t tcp_detached
= TCP_IS_DETACHED(tcp
);
608 conn_t
*connp
= tcp
->tcp_connp
;
609 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
614 flags
= IPDF_LSO
| IPDF_ZCOPY
;
616 * Make sure we have a dce for the destination to avoid dce_ident
617 * contention for connected sockets.
619 flags
|= IPDF_UNIQUE_DCE
;
621 if (!tcps
->tcps_ignore_path_mtu
)
622 connp
->conn_ixa
->ixa_flags
|= IXAF_PMTU_DISCOVERY
;
624 /* Use conn_lock to satify ASSERT; tcp is already serialized */
625 mutex_enter(&connp
->conn_lock
);
626 error
= conn_connect(connp
, &uinfo
, flags
);
627 mutex_exit(&connp
->conn_lock
);
631 error
= tcp_build_hdrs(tcp
);
635 tcp
->tcp_localnet
= uinfo
.iulp_localnet
;
637 if (uinfo
.iulp_rtt
!= 0) {
640 tcp
->tcp_rtt_sa
= uinfo
.iulp_rtt
;
641 tcp
->tcp_rtt_sd
= uinfo
.iulp_rtt_sd
;
642 rto
= (tcp
->tcp_rtt_sa
>> 3) + tcp
->tcp_rtt_sd
+
643 tcps
->tcps_rexmit_interval_extra
+
644 (tcp
->tcp_rtt_sa
>> 5);
646 TCP_SET_RTO(tcp
, rto
);
648 if (uinfo
.iulp_ssthresh
!= 0)
649 tcp
->tcp_cwnd_ssthresh
= uinfo
.iulp_ssthresh
;
651 tcp
->tcp_cwnd_ssthresh
= TCP_MAX_LARGEWIN
;
652 if (uinfo
.iulp_spipe
> 0) {
653 connp
->conn_sndbuf
= MIN(uinfo
.iulp_spipe
,
655 if (tcps
->tcps_snd_lowat_fraction
!= 0) {
656 connp
->conn_sndlowat
= connp
->conn_sndbuf
/
657 tcps
->tcps_snd_lowat_fraction
;
659 (void) tcp_maxpsz_set(tcp
, B_TRUE
);
662 * Note that up till now, acceptor always inherits receive
663 * window from the listener. But if there is a metrics
664 * associated with a host, we should use that instead of
665 * inheriting it from listener. Thus we need to pass this
666 * info back to the caller.
668 if (uinfo
.iulp_rpipe
> 0) {
669 tcp
->tcp_rwnd
= MIN(uinfo
.iulp_rpipe
,
673 if (uinfo
.iulp_rtomax
> 0) {
674 tcp
->tcp_second_timer_threshold
=
679 * Use the metric option settings, iulp_tstamp_ok and
680 * iulp_wscale_ok, only for active open. What this means
681 * is that if the other side uses timestamp or window
682 * scale option, TCP will also use those options. That
683 * is for passive open. If the application sets a
684 * large window, window scale is enabled regardless of
685 * the value in iulp_wscale_ok. This is the behavior
686 * since 2.6. So we keep it.
687 * The only case left in passive open processing is the
689 * For ECN, it should probably be like SACK. But the
690 * current value is binary, so we treat it like the other
691 * cases. The metric only controls active open.For passive
692 * open, the ndd param, tcp_ecn_permitted, controls the
697 * The if check means that the following can only
698 * be turned on by the metrics only IRE, but not off.
700 if (uinfo
.iulp_tstamp_ok
)
701 tcp
->tcp_snd_ts_ok
= B_TRUE
;
702 if (uinfo
.iulp_wscale_ok
)
703 tcp
->tcp_snd_ws_ok
= B_TRUE
;
704 if (uinfo
.iulp_sack
== 2)
705 tcp
->tcp_snd_sack_ok
= B_TRUE
;
706 if (uinfo
.iulp_ecn_ok
)
707 tcp
->tcp_ecn_ok
= B_TRUE
;
712 * As above, the if check means that SACK can only be
713 * turned on by the metric only IRE.
715 if (uinfo
.iulp_sack
> 0) {
716 tcp
->tcp_snd_sack_ok
= B_TRUE
;
721 * XXX Note that currently, iulp_mtu can be as small as 68
722 * because of PMTUd. So tcp_mss may go to negative if combined
723 * length of all those options exceeds 28 bytes. But because
724 * of the tcp_mss_min check below, we may not have a problem if
725 * tcp_mss_min is of a reasonable value. The default is 1 so
726 * the negative problem still exists. And the check defeats PMTUd.
727 * In fact, if PMTUd finds that the MSS should be smaller than
728 * tcp_mss_min, TCP should turn off PMUTd and use the tcp_mss_min
731 * We do not deal with that now. All those problems related to
732 * PMTUd will be fixed later.
734 ASSERT(uinfo
.iulp_mtu
!= 0);
735 mss
= tcp
->tcp_initial_pmtu
= uinfo
.iulp_mtu
;
737 /* Sanity check for MSS value. */
738 if (connp
->conn_ipversion
== IPV4_VERSION
)
739 mss_max
= tcps
->tcps_mss_max_ipv4
;
741 mss_max
= tcps
->tcps_mss_max_ipv6
;
743 if (tcp
->tcp_ipsec_overhead
== 0)
744 tcp
->tcp_ipsec_overhead
= conn_ipsec_length(connp
);
746 mss
-= tcp
->tcp_ipsec_overhead
;
748 if (mss
< tcps
->tcps_mss_min
)
749 mss
= tcps
->tcps_mss_min
;
753 /* Note that this is the maximum MSS, excluding all options. */
757 * Update the tcp connection with LSO capability.
759 tcp_update_lso(tcp
, connp
->conn_ixa
);
762 * Initialize the ISS here now that we have the full connection ID.
763 * The RFC 1948 method of initial sequence number generation requires
764 * knowledge of the full connection ID before setting the ISS.
768 tcp
->tcp_loopback
= (uinfo
.iulp_loopback
| uinfo
.iulp_local
);
771 * Make sure that conn is not marked incipient
772 * for incoming connections. A blind
773 * removal of incipient flag is cheaper than
776 mutex_enter(&connp
->conn_lock
);
777 connp
->conn_state_flags
&= ~CONN_INCIPIENT
;
778 mutex_exit(&connp
->conn_lock
);
783 * tcp_clean_death / tcp_close_detached must not be called more than once
784 * on a tcp. Thus every function that potentially calls tcp_clean_death
785 * must check for the tcp state before calling tcp_clean_death.
786 * Eg. tcp_input_data, tcp_eager_kill, tcp_clean_death_wrapper,
787 * tcp_timer_handler, all check for the tcp state.
791 tcp_clean_death_wrapper(void *arg
, mblk_t
*mp
, void *arg2
,
792 ip_recv_attr_t
*dummy
)
794 tcp_t
*tcp
= ((conn_t
*)arg
)->conn_tcp
;
797 if (tcp
->tcp_state
> TCPS_BOUND
)
798 (void) tcp_clean_death(((conn_t
*)arg
)->conn_tcp
, ETIMEDOUT
);
802 * We are dying for some reason. Try to do it gracefully. (May be called
805 * Return -1 if the structure was not cleaned up (if the cleanup had to be
806 * done by a service procedure).
807 * TBD - Should the return value distinguish between the tcp_t being
808 * freed and it being reinitialized?
811 tcp_clean_death(tcp_t
*tcp
, int err
)
815 conn_t
*connp
= tcp
->tcp_connp
;
816 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
821 if (tcp
->tcp_linger_tid
!= 0 &&
822 TCP_TIMER_CANCEL(tcp
, tcp
->tcp_linger_tid
) >= 0) {
823 tcp_stop_lingering(tcp
);
827 ASSERT((connp
->conn_family
== AF_INET
&&
828 connp
->conn_ipversion
== IPV4_VERSION
) ||
829 (connp
->conn_family
== AF_INET6
&&
830 (connp
->conn_ipversion
== IPV4_VERSION
||
831 connp
->conn_ipversion
== IPV6_VERSION
)));
833 if (TCP_IS_DETACHED(tcp
)) {
834 if (tcp
->tcp_hard_binding
) {
836 * Its an eager that we are dealing with. We close the
837 * eager but in case a conn_ind has already gone to the
838 * listener, let tcp_accept_finish() send a discon_ind
839 * to the listener and drop the last reference. If the
840 * listener doesn't even know about the eager i.e. the
841 * conn_ind hasn't gone up, blow away the eager and drop
842 * the last reference as well. If the conn_ind has gone
843 * up, state should be BOUND. tcp_accept_finish
844 * will figure out that the connection has received a
845 * RST and will send a DISCON_IND to the application.
847 tcp_closei_local(tcp
);
848 if (!tcp
->tcp_tconnind_started
) {
851 tcp
->tcp_state
= TCPS_BOUND
;
852 DTRACE_TCP6(state__change
, void, NULL
,
853 ip_xmit_attr_t
*, connp
->conn_ixa
,
854 void, NULL
, tcp_t
*, tcp
, void, NULL
,
855 int32_t, TCPS_CLOSED
);
858 tcp_close_detached(tcp
);
863 TCP_STAT(tcps
, tcp_clean_death_nondetached
);
866 * The connection is dead. Decrement listener connection counter if
869 if (tcp
->tcp_listen_cnt
!= NULL
)
870 TCP_DECR_LISTEN_CNT(tcp
);
873 * When a connection is moved to TIME_WAIT state, the connection
874 * counter is already decremented. So no need to decrement here
875 * again. See SET_TIME_WAIT() macro.
877 if (tcp
->tcp_state
>= TCPS_ESTABLISHED
&&
878 tcp
->tcp_state
< TCPS_TIME_WAIT
) {
884 /* Trash all inbound data */
885 if (!IPCL_IS_NONSTR(connp
)) {
891 * If we are at least part way open and there is error
892 * (err==0 implies no error)
893 * notify our client by a T_DISCON_IND.
895 if ((tcp
->tcp_state
>= TCPS_SYN_SENT
) && err
) {
896 if (tcp
->tcp_state
>= TCPS_ESTABLISHED
&&
897 !TCP_IS_SOCKET(tcp
)) {
899 * Send M_FLUSH according to TPI. Because sockets will
900 * (and must) ignore FLUSHR we do that only for TPI
901 * endpoints and sockets in STREAMS mode.
903 (void) putnextctl1(q
, M_FLUSH
, FLUSHR
);
905 if (connp
->conn_debug
) {
906 (void) strlog(TCP_MOD_ID
, 0, 1, SL_TRACE
|SL_ERROR
,
907 "tcp_clean_death: discon err %d", err
);
909 if (IPCL_IS_NONSTR(connp
)) {
910 /* Direct socket, use upcall */
911 (*connp
->conn_upcalls
->su_disconnected
)(
912 connp
->conn_upper_handle
, tcp
->tcp_connid
, err
);
914 mp
= mi_tpi_discon_ind(NULL
, err
, 0);
918 if (connp
->conn_debug
) {
919 (void) strlog(TCP_MOD_ID
, 0, 1,
921 "tcp_clean_death, sending M_ERROR");
923 (void) putnextctl1(q
, M_ERROR
, EPROTO
);
926 if (tcp
->tcp_state
<= TCPS_SYN_RCVD
) {
927 /* SYN_SENT or SYN_RCVD */
928 TCPS_BUMP_MIB(tcps
, tcpAttemptFails
);
929 } else if (tcp
->tcp_state
<= TCPS_CLOSE_WAIT
) {
930 /* ESTABLISHED or CLOSE_WAIT */
931 TCPS_BUMP_MIB(tcps
, tcpEstabResets
);
936 * ESTABLISHED non-STREAMS eagers are not 'detached' because
937 * an upper handle is obtained when the SYN-ACK comes in. So it
938 * should receive the 'disconnected' upcall, but tcp_reinit should
939 * not be called since this is an eager.
941 if (tcp
->tcp_listener
!= NULL
&& IPCL_IS_NONSTR(connp
)) {
942 tcp_closei_local(tcp
);
943 tcp
->tcp_state
= TCPS_BOUND
;
944 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
945 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
946 int32_t, TCPS_CLOSED
);
951 if (IPCL_IS_NONSTR(connp
))
952 (void) tcp_do_unbind(connp
);
958 * In case tcp is in the "lingering state" and waits for the SO_LINGER timeout
959 * to expire, stop the wait and finish the close.
962 tcp_stop_lingering(tcp_t
*tcp
)
965 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
966 conn_t
*connp
= tcp
->tcp_connp
;
968 tcp
->tcp_linger_tid
= 0;
969 if (tcp
->tcp_state
> TCPS_LISTEN
) {
970 tcp_acceptor_hash_remove(tcp
);
971 mutex_enter(&tcp
->tcp_non_sq_lock
);
972 if (tcp
->tcp_flow_stopped
) {
975 mutex_exit(&tcp
->tcp_non_sq_lock
);
977 if (tcp
->tcp_timer_tid
!= 0) {
978 delta
= TCP_TIMER_CANCEL(tcp
, tcp
->tcp_timer_tid
);
979 tcp
->tcp_timer_tid
= 0;
982 * Need to cancel those timers which will not be used when
983 * TCP is detached. This has to be done before the conn_wq
986 tcp_timers_stop(tcp
);
988 tcp
->tcp_detached
= B_TRUE
;
989 connp
->conn_rq
= NULL
;
990 connp
->conn_wq
= NULL
;
992 if (tcp
->tcp_state
== TCPS_TIME_WAIT
) {
993 tcp_time_wait_append(tcp
);
994 TCP_DBGSTAT(tcps
, tcp_detach_time_wait
);
999 * If delta is zero the timer event wasn't executed and was
1000 * successfully canceled. In this case we need to restart it
1001 * with the minimal delta possible.
1004 tcp
->tcp_timer_tid
= TCP_TIMER(tcp
, tcp_timer
,
1008 tcp_closei_local(tcp
);
1009 CONN_DEC_REF(connp
);
1012 tcp
->tcp_detached
= B_TRUE
;
1013 connp
->conn_rq
= NULL
;
1014 connp
->conn_wq
= NULL
;
1016 /* Signal closing thread that it can complete close */
1017 mutex_enter(&tcp
->tcp_closelock
);
1018 tcp
->tcp_closed
= 1;
1019 cv_signal(&tcp
->tcp_closecv
);
1020 mutex_exit(&tcp
->tcp_closelock
);
1022 /* If we have an upper handle (socket), release it */
1023 if (IPCL_IS_NONSTR(connp
)) {
1024 ASSERT(connp
->conn_upper_handle
!= NULL
);
1025 (*connp
->conn_upcalls
->su_closed
)(connp
->conn_upper_handle
);
1026 connp
->conn_upper_handle
= NULL
;
1027 connp
->conn_upcalls
= NULL
;
1032 tcp_close_common(conn_t
*connp
, int flags
)
1034 tcp_t
*tcp
= connp
->conn_tcp
;
1035 mblk_t
*mp
= &tcp
->tcp_closemp
;
1036 boolean_t conn_ioctl_cleanup_reqd
= B_FALSE
;
1039 ASSERT(connp
->conn_ref
>= 2);
1042 * Mark the conn as closing. ipsq_pending_mp_add will not
1043 * add any mp to the pending mp list, after this conn has
1046 mutex_enter(&connp
->conn_lock
);
1047 connp
->conn_state_flags
|= CONN_CLOSING
;
1048 if (connp
->conn_oper_pending_ill
!= NULL
)
1049 conn_ioctl_cleanup_reqd
= B_TRUE
;
1050 CONN_INC_REF_LOCKED(connp
);
1051 mutex_exit(&connp
->conn_lock
);
1052 tcp
->tcp_closeflags
= (uint8_t)flags
;
1053 ASSERT(connp
->conn_ref
>= 3);
1056 * tcp_closemp_used is used below without any protection of a lock
1057 * as we don't expect any one else to use it concurrently at this
1058 * point otherwise it would be a major defect.
1061 if (mp
->b_prev
== NULL
)
1062 tcp
->tcp_closemp_used
= B_TRUE
;
1064 cmn_err(CE_PANIC
, "tcp_close: concurrent use of tcp_closemp: "
1065 "connp %p tcp %p\n", (void *)connp
, (void *)tcp
);
1067 TCP_DEBUG_GETPCSTACK(tcp
->tcmp_stk
, 15);
1070 * Cleanup any queued ioctls here. This must be done before the wq/rq
1071 * are re-written by tcp_close_output().
1073 if (conn_ioctl_cleanup_reqd
)
1074 conn_ioctl_cleanup(connp
);
1077 * As CONN_CLOSING is set, no further ioctls should be passed down to
1078 * IP for this conn (see the guards in tcp_ioctl, tcp_wput_ioctl and
1079 * tcp_wput_iocdata). If the ioctl was queued on an ipsq,
1080 * conn_ioctl_cleanup should have found it and removed it. If the ioctl
1081 * was still in flight at the time, we wait for it here. See comments
1082 * for CONN_INC_IOCTLREF in ip.h for details.
1084 mutex_enter(&connp
->conn_lock
);
1085 while (connp
->conn_ioctlref
> 0)
1086 cv_wait(&connp
->conn_cv
, &connp
->conn_lock
);
1087 ASSERT(connp
->conn_ioctlref
== 0);
1088 ASSERT(connp
->conn_oper_pending_ill
== NULL
);
1089 mutex_exit(&connp
->conn_lock
);
1091 SQUEUE_ENTER_ONE(connp
->conn_sqp
, mp
, tcp_close_output
, connp
,
1092 NULL
, tcp_squeue_flag
, SQTAG_IP_TCP_CLOSE
);
1095 * For non-STREAMS sockets, the normal case is that the conn makes
1096 * an upcall when it's finally closed, so there is no need to wait
1097 * in the protocol. But in case of SO_LINGER the thread sleeps here
1098 * so it can properly deal with the thread being interrupted.
1100 if (IPCL_IS_NONSTR(connp
) && connp
->conn_linger
== 0)
1103 mutex_enter(&tcp
->tcp_closelock
);
1104 while (!tcp
->tcp_closed
) {
1105 if (!cv_wait_sig(&tcp
->tcp_closecv
, &tcp
->tcp_closelock
)) {
1107 * The cv_wait_sig() was interrupted. We now do the
1110 * 1) If the endpoint was lingering, we allow this
1111 * to be interrupted by cancelling the linger timeout
1112 * and closing normally.
1114 * 2) Revert to calling cv_wait()
1116 * We revert to using cv_wait() to avoid an
1117 * infinite loop which can occur if the calling
1118 * thread is higher priority than the squeue worker
1119 * thread and is bound to the same cpu.
1121 if (connp
->conn_linger
&& connp
->conn_lingertime
> 0) {
1122 mutex_exit(&tcp
->tcp_closelock
);
1123 /* Entering squeue, bump ref count. */
1124 CONN_INC_REF(connp
);
1125 bp
= allocb_wait(0, BPRI_HI
, STR_NOSIG
, NULL
);
1126 SQUEUE_ENTER_ONE(connp
->conn_sqp
, bp
,
1127 tcp_linger_interrupted
, connp
, NULL
,
1128 tcp_squeue_flag
, SQTAG_IP_TCP_CLOSE
);
1129 mutex_enter(&tcp
->tcp_closelock
);
1134 while (!tcp
->tcp_closed
)
1135 cv_wait(&tcp
->tcp_closecv
, &tcp
->tcp_closelock
);
1136 mutex_exit(&tcp
->tcp_closelock
);
1139 * In the case of listener streams that have eagers in the q or q0
1140 * we wait for the eagers to drop their reference to us. conn_rq and
1141 * conn_wq of the eagers point to our queues. By waiting for the
1142 * refcnt to drop to 1, we are sure that the eagers have cleaned
1143 * up their queue pointers and also dropped their references to us.
1145 * For non-STREAMS sockets we do not have to wait here; the
1146 * listener will instead make a su_closed upcall when the last
1147 * reference is dropped.
1149 if (tcp
->tcp_wait_for_eagers
&& !IPCL_IS_NONSTR(connp
)) {
1150 mutex_enter(&connp
->conn_lock
);
1151 while (connp
->conn_ref
!= 1) {
1152 cv_wait(&connp
->conn_cv
, &connp
->conn_lock
);
1154 mutex_exit(&connp
->conn_lock
);
1158 connp
->conn_cpid
= NOPID
;
1162 * Called by tcp_close() routine via squeue when lingering is
1163 * interrupted by a signal.
1168 tcp_linger_interrupted(void *arg
, mblk_t
*mp
, void *arg2
, ip_recv_attr_t
*dummy
)
1170 conn_t
*connp
= (conn_t
*)arg
;
1171 tcp_t
*tcp
= connp
->conn_tcp
;
1174 if (tcp
->tcp_linger_tid
!= 0 &&
1175 TCP_TIMER_CANCEL(tcp
, tcp
->tcp_linger_tid
) >= 0) {
1176 tcp_stop_lingering(tcp
);
1177 tcp
->tcp_client_errno
= EINTR
;
1182 * Clean up the b_next and b_prev fields of every mblk pointed at by *mpp.
1183 * Some stream heads get upset if they see these later on as anything but NULL.
1186 tcp_close_mpp(mblk_t
**mpp
)
1190 if ((mp
= *mpp
) != NULL
) {
1194 } while ((mp
= mp
->b_cont
) != NULL
);
1202 /* Do detached close. */
1204 tcp_close_detached(tcp_t
*tcp
)
1209 tcp_closei_local(tcp
);
1210 CONN_DEC_REF(tcp
->tcp_connp
);
1214 * The tcp_t is going away. Remove it from all lists and set it
1215 * to TCPS_CLOSED. The freeing up of memory is deferred until
1216 * tcp_inactive. This is needed since a thread in tcp_rput might have
1217 * done a CONN_INC_REF on this structure before it was removed from the
1221 tcp_closei_local(tcp_t
*tcp
)
1223 conn_t
*connp
= tcp
->tcp_connp
;
1224 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
1227 if (!TCP_IS_SOCKET(tcp
))
1228 tcp_acceptor_hash_remove(tcp
);
1230 TCPS_UPDATE_MIB(tcps
, tcpHCInSegs
, tcp
->tcp_ibsegs
);
1231 tcp
->tcp_ibsegs
= 0;
1232 TCPS_UPDATE_MIB(tcps
, tcpHCOutSegs
, tcp
->tcp_obsegs
);
1233 tcp
->tcp_obsegs
= 0;
1236 * This can be called via tcp_time_wait_processing() if TCP gets a
1237 * SYN with sequence number outside the TIME-WAIT connection's
1238 * window. So we need to check for TIME-WAIT state here as the
1239 * connection counter is already decremented. See SET_TIME_WAIT()
1242 if (tcp
->tcp_state
>= TCPS_ESTABLISHED
&&
1243 tcp
->tcp_state
< TCPS_TIME_WAIT
) {
1244 TCPS_CONN_DEC(tcps
);
1248 * If we are an eager connection hanging off a listener that
1249 * hasn't formally accepted the connection yet, get off its
1250 * list and blow off any data that we have accumulated.
1252 if (tcp
->tcp_listener
!= NULL
) {
1253 tcp_t
*listener
= tcp
->tcp_listener
;
1254 mutex_enter(&listener
->tcp_eager_lock
);
1256 * tcp_tconnind_started == B_TRUE means that the
1257 * conn_ind has already gone to listener. At
1258 * this point, eager will be closed but we
1259 * leave it in listeners eager list so that
1260 * if listener decides to close without doing
1261 * accept, we can clean this up. In tcp_tli_accept
1262 * we take care of the case of accept on closed
1265 if (!tcp
->tcp_tconnind_started
) {
1266 tcp_eager_unlink(tcp
);
1267 mutex_exit(&listener
->tcp_eager_lock
);
1269 * We don't want to have any pointers to the
1270 * listener queue, after we have released our
1271 * reference on the listener
1273 ASSERT(tcp
->tcp_detached
);
1274 connp
->conn_rq
= NULL
;
1275 connp
->conn_wq
= NULL
;
1276 CONN_DEC_REF(listener
->tcp_connp
);
1278 mutex_exit(&listener
->tcp_eager_lock
);
1282 /* Stop all the timers */
1283 tcp_timers_stop(tcp
);
1285 if (tcp
->tcp_state
== TCPS_LISTEN
) {
1286 if (tcp
->tcp_ip_addr_cache
) {
1287 kmem_free((void *)tcp
->tcp_ip_addr_cache
,
1288 IP_ADDR_CACHE_SIZE
* sizeof (ipaddr_t
));
1289 tcp
->tcp_ip_addr_cache
= NULL
;
1293 /* Decrement listerner connection counter if necessary. */
1294 if (tcp
->tcp_listen_cnt
!= NULL
)
1295 TCP_DECR_LISTEN_CNT(tcp
);
1297 mutex_enter(&tcp
->tcp_non_sq_lock
);
1298 if (tcp
->tcp_flow_stopped
)
1300 mutex_exit(&tcp
->tcp_non_sq_lock
);
1302 tcp_bind_hash_remove(tcp
);
1304 * If the tcp_time_wait_collector (which runs outside the squeue)
1305 * is trying to remove this tcp from the time wait list, we will
1306 * block in tcp_time_wait_remove while trying to acquire the
1307 * tcp_time_wait_lock. The logic in tcp_time_wait_collector also
1308 * requires the ipcl_hash_remove to be ordered after the
1309 * tcp_time_wait_remove for the refcnt checks to work correctly.
1311 if (tcp
->tcp_state
== TCPS_TIME_WAIT
)
1312 (void) tcp_time_wait_remove(tcp
, NULL
);
1313 ipcl_hash_remove(connp
);
1314 oldstate
= tcp
->tcp_state
;
1315 tcp
->tcp_state
= TCPS_CLOSED
;
1316 /* Need to probe before ixa_cleanup() is called */
1317 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
1318 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
1320 ixa_cleanup(connp
->conn_ixa
);
1323 * Mark the conn as CONDEMNED
1325 mutex_enter(&connp
->conn_lock
);
1326 connp
->conn_state_flags
|= CONN_CONDEMNED
;
1327 mutex_exit(&connp
->conn_lock
);
1329 ASSERT(tcp
->tcp_time_wait_next
== NULL
);
1330 ASSERT(tcp
->tcp_time_wait_prev
== NULL
);
1331 ASSERT(tcp
->tcp_time_wait_expire
== 0);
1333 tcp_ipsec_cleanup(tcp
);
1337 * tcp is dying (called from ipcl_conn_destroy and error cases).
1338 * Free the tcp_t in either case.
1341 tcp_free(tcp_t
*tcp
)
1344 conn_t
*connp
= tcp
->tcp_connp
;
1346 ASSERT(tcp
!= NULL
);
1347 ASSERT(tcp
->tcp_ptpahn
== NULL
&& tcp
->tcp_acceptor_hash
== NULL
);
1349 connp
->conn_rq
= NULL
;
1350 connp
->conn_wq
= NULL
;
1352 tcp_close_mpp(&tcp
->tcp_xmit_head
);
1353 tcp_close_mpp(&tcp
->tcp_reass_head
);
1354 if (tcp
->tcp_rcv_list
!= NULL
) {
1355 /* Free b_next chain */
1356 tcp_close_mpp(&tcp
->tcp_rcv_list
);
1358 if ((mp
= tcp
->tcp_urp_mp
) != NULL
) {
1361 if ((mp
= tcp
->tcp_urp_mark_mp
) != NULL
) {
1365 if (tcp
->tcp_fused_sigurg_mp
!= NULL
) {
1366 ASSERT(!IPCL_IS_NONSTR(tcp
->tcp_connp
));
1367 freeb(tcp
->tcp_fused_sigurg_mp
);
1368 tcp
->tcp_fused_sigurg_mp
= NULL
;
1371 if (tcp
->tcp_ordrel_mp
!= NULL
) {
1372 ASSERT(!IPCL_IS_NONSTR(tcp
->tcp_connp
));
1373 freeb(tcp
->tcp_ordrel_mp
);
1374 tcp
->tcp_ordrel_mp
= NULL
;
1377 TCP_NOTSACK_REMOVE_ALL(tcp
->tcp_notsack_list
, tcp
);
1378 bzero(&tcp
->tcp_sack_info
, sizeof (tcp_sack_info_t
));
1380 if (tcp
->tcp_hopopts
!= NULL
) {
1381 mi_free(tcp
->tcp_hopopts
);
1382 tcp
->tcp_hopopts
= NULL
;
1383 tcp
->tcp_hopoptslen
= 0;
1385 ASSERT(tcp
->tcp_hopoptslen
== 0);
1386 if (tcp
->tcp_dstopts
!= NULL
) {
1387 mi_free(tcp
->tcp_dstopts
);
1388 tcp
->tcp_dstopts
= NULL
;
1389 tcp
->tcp_dstoptslen
= 0;
1391 ASSERT(tcp
->tcp_dstoptslen
== 0);
1392 if (tcp
->tcp_rthdrdstopts
!= NULL
) {
1393 mi_free(tcp
->tcp_rthdrdstopts
);
1394 tcp
->tcp_rthdrdstopts
= NULL
;
1395 tcp
->tcp_rthdrdstoptslen
= 0;
1397 ASSERT(tcp
->tcp_rthdrdstoptslen
== 0);
1398 if (tcp
->tcp_rthdr
!= NULL
) {
1399 mi_free(tcp
->tcp_rthdr
);
1400 tcp
->tcp_rthdr
= NULL
;
1401 tcp
->tcp_rthdrlen
= 0;
1403 ASSERT(tcp
->tcp_rthdrlen
== 0);
1406 * Following is really a blowing away a union.
1407 * It happens to have exactly two members of identical size
1408 * the following code is enough.
1410 tcp_close_mpp(&tcp
->tcp_conn
.tcp_eager_conn_ind
);
1413 * If this is a non-STREAM socket still holding on to an upper
1414 * handle, release it. As a result of fallback we might also see
1415 * STREAMS based conns with upper handles, in which case there is
1416 * nothing to do other than clearing the field.
1418 if (connp
->conn_upper_handle
!= NULL
) {
1419 if (IPCL_IS_NONSTR(connp
)) {
1420 (*connp
->conn_upcalls
->su_closed
)(
1421 connp
->conn_upper_handle
);
1422 tcp
->tcp_detached
= B_TRUE
;
1424 connp
->conn_upper_handle
= NULL
;
1425 connp
->conn_upcalls
= NULL
;
1430 * tcp_get_conn/tcp_free_conn
1432 * tcp_get_conn is used to get a clean tcp connection structure.
1433 * It tries to reuse the connections put on the freelist by the
1434 * time_wait_collector failing which it goes to kmem_cache. This
1435 * way has two benefits compared to just allocating from and
1436 * freeing to kmem_cache.
1437 * 1) The time_wait_collector can free (which includes the cleanup)
1438 * outside the squeue. So when the interrupt comes, we have a clean
1439 * connection sitting in the freelist. Obviously, this buys us
1442 * 2) Defence against DOS attack. Allocating a tcp/conn in tcp_input_listener
1443 * has multiple disadvantages - tying up the squeue during alloc.
1444 * But allocating the conn/tcp in IP land is also not the best since
1445 * we can't check the 'q' and 'q0' which are protected by squeue and
1446 * blindly allocate memory which might have to be freed here if we are
1447 * not allowed to accept the connection. By using the freelist and
1448 * putting the conn/tcp back in freelist, we don't pay a penalty for
1449 * allocating memory without checking 'q/q0' and freeing it if we can't
1450 * accept the connection.
1452 * Care should be taken to put the conn back in the same squeue's freelist
1453 * from which it was allocated. Best results are obtained if conn is
1454 * allocated from listener's squeue and freed to the same. Time wait
1455 * collector will free up the freelist is the connection ends up sitting
1456 * there for too long.
1459 tcp_get_conn(void *arg
, tcp_stack_t
*tcps
)
1462 conn_t
*connp
= NULL
;
1463 squeue_t
*sqp
= (squeue_t
*)arg
;
1464 tcp_squeue_priv_t
*tcp_time_wait
;
1466 mblk_t
*tcp_rsrv_mp
= NULL
;
1469 *((tcp_squeue_priv_t
**)squeue_getprivate(sqp
, SQPRIVATE_TCP
));
1471 mutex_enter(&tcp_time_wait
->tcp_time_wait_lock
);
1472 tcp
= tcp_time_wait
->tcp_free_list
;
1473 ASSERT((tcp
!= NULL
) ^ (tcp_time_wait
->tcp_free_list_cnt
== 0));
1475 tcp_time_wait
->tcp_free_list
= tcp
->tcp_time_wait_next
;
1476 tcp_time_wait
->tcp_free_list_cnt
--;
1477 mutex_exit(&tcp_time_wait
->tcp_time_wait_lock
);
1478 tcp
->tcp_time_wait_next
= NULL
;
1479 connp
= tcp
->tcp_connp
;
1480 connp
->conn_flags
|= IPCL_REUSED
;
1482 ASSERT(tcp
->tcp_tcps
== NULL
);
1483 ASSERT(connp
->conn_netstack
== NULL
);
1484 ASSERT(tcp
->tcp_rsrv_mp
!= NULL
);
1485 ns
= tcps
->tcps_netstack
;
1487 connp
->conn_netstack
= ns
;
1488 connp
->conn_ixa
->ixa_ipst
= ns
->netstack_ip
;
1489 tcp
->tcp_tcps
= tcps
;
1490 ipcl_globalhash_insert(connp
);
1492 connp
->conn_ixa
->ixa_notify_cookie
= tcp
;
1493 ASSERT(connp
->conn_ixa
->ixa_notify
== tcp_notify
);
1494 connp
->conn_recv
= tcp_input_data
;
1495 ASSERT(connp
->conn_recvicmp
== tcp_icmp_input
);
1496 ASSERT(connp
->conn_verifyicmp
== tcp_verifyicmp
);
1497 return ((void *)connp
);
1499 mutex_exit(&tcp_time_wait
->tcp_time_wait_lock
);
1501 * Pre-allocate the tcp_rsrv_mp. This mblk will not be freed until
1502 * this conn_t/tcp_t is freed at ipcl_conn_destroy().
1504 tcp_rsrv_mp
= allocb(0, BPRI_HI
);
1505 if (tcp_rsrv_mp
== NULL
)
1508 if ((connp
= ipcl_conn_create(IPCL_TCPCONN
, KM_NOSLEEP
,
1509 tcps
->tcps_netstack
)) == NULL
) {
1514 tcp
= connp
->conn_tcp
;
1515 tcp
->tcp_rsrv_mp
= tcp_rsrv_mp
;
1516 mutex_init(&tcp
->tcp_rsrv_mp_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1518 tcp
->tcp_tcps
= tcps
;
1520 connp
->conn_recv
= tcp_input_data
;
1521 connp
->conn_recvicmp
= tcp_icmp_input
;
1522 connp
->conn_verifyicmp
= tcp_verifyicmp
;
1525 * Register tcp_notify to listen to capability changes detected by IP.
1526 * This upcall is made in the context of the call to conn_ip_output
1527 * thus it is inside the squeue.
1529 connp
->conn_ixa
->ixa_notify
= tcp_notify
;
1530 connp
->conn_ixa
->ixa_notify_cookie
= tcp
;
1532 return ((void *)connp
);
1536 * Handle connect to IPv4 destinations, including connections for AF_INET6
1537 * sockets connecting to IPv4 mapped IPv6 destinations.
1538 * Returns zero if OK, a positive errno, or a negative TLI error.
1541 tcp_connect_ipv4(tcp_t
*tcp
, ipaddr_t
*dstaddrp
, in_port_t dstport
,
1544 ipaddr_t dstaddr
= *dstaddrp
;
1546 conn_t
*connp
= tcp
->tcp_connp
;
1547 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
1550 ASSERT(connp
->conn_ipversion
== IPV4_VERSION
);
1552 /* Check for attempt to connect to INADDR_ANY */
1553 if (dstaddr
== INADDR_ANY
) {
1555 * SunOS 4.x and 4.3 BSD allow an application
1556 * to connect a TCP socket to INADDR_ANY.
1557 * When they do this, the kernel picks the
1558 * address of one interface and uses it
1559 * instead. The kernel usually ends up
1560 * picking the address of the loopback
1561 * interface. This is an undocumented feature.
1562 * However, we provide the same thing here
1563 * in order to have source and binary
1564 * compatibility with SunOS 4.x.
1565 * Update the T_CONN_REQ (sin/sin6) since it is used to
1566 * generate the T_CONN_CON.
1568 dstaddr
= htonl(INADDR_LOOPBACK
);
1569 *dstaddrp
= dstaddr
;
1572 /* Handle __sin6_src_id if socket not bound to an IP address */
1573 if (srcid
!= 0 && connp
->conn_laddr_v4
== INADDR_ANY
) {
1574 if (!ip_srcid_find_id(srcid
, &connp
->conn_laddr_v6
,
1575 IPCL_ZONEID(connp
), B_TRUE
, tcps
->tcps_netstack
)) {
1576 /* Mismatch - conn_laddr_v6 would be v6 address. */
1577 return (EADDRNOTAVAIL
);
1579 connp
->conn_saddr_v6
= connp
->conn_laddr_v6
;
1582 IN6_IPADDR_TO_V4MAPPED(dstaddr
, &connp
->conn_faddr_v6
);
1583 connp
->conn_fport
= dstport
;
1586 * At this point the remote destination address and remote port fields
1587 * in the tcp-four-tuple have been filled in the tcp structure. Now we
1588 * have to see which state tcp was in so we can take appropriate action.
1590 if (tcp
->tcp_state
== TCPS_IDLE
) {
1592 * We support a quick connect capability here, allowing
1593 * clients to transition directly from IDLE to SYN_SENT
1594 * tcp_bindi will pick an unused port, insert the connection
1595 * in the bind hash and transition to BOUND state.
1597 lport
= tcp_update_next_port(tcps
->tcps_next_port_to_try
,
1599 lport
= tcp_bindi(tcp
, lport
, &connp
->conn_laddr_v6
, 0, B_TRUE
,
1606 * Lookup the route to determine a source address and the uinfo.
1607 * Setup TCP parameters based on the metrics/DCE.
1609 error
= tcp_set_destination(tcp
);
1614 * Don't let an endpoint connect to itself.
1616 if (connp
->conn_faddr_v4
== connp
->conn_laddr_v4
&&
1617 connp
->conn_fport
== connp
->conn_lport
)
1620 tcp
->tcp_state
= TCPS_SYN_SENT
;
1622 return (ipcl_conn_insert_v4(connp
));
1626 * Handle connect to IPv6 destinations.
1627 * Returns zero if OK, a positive errno, or a negative TLI error.
1630 tcp_connect_ipv6(tcp_t
*tcp
, in6_addr_t
*dstaddrp
, in_port_t dstport
,
1631 uint32_t flowinfo
, uint_t srcid
, uint32_t scope_id
)
1634 conn_t
*connp
= tcp
->tcp_connp
;
1635 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
1638 ASSERT(connp
->conn_family
== AF_INET6
);
1641 * If we're here, it means that the destination address is a native
1642 * IPv6 address. Return an error if conn_ipversion is not IPv6. A
1643 * reason why it might not be IPv6 is if the socket was bound to an
1644 * IPv4-mapped IPv6 address.
1646 if (connp
->conn_ipversion
!= IPV6_VERSION
)
1650 * Interpret a zero destination to mean loopback.
1651 * Update the T_CONN_REQ (sin/sin6) since it is used to
1652 * generate the T_CONN_CON.
1654 if (IN6_IS_ADDR_UNSPECIFIED(dstaddrp
))
1655 *dstaddrp
= ipv6_loopback
;
1657 /* Handle __sin6_src_id if socket not bound to an IP address */
1658 if (srcid
!= 0 && IN6_IS_ADDR_UNSPECIFIED(&connp
->conn_laddr_v6
)) {
1659 if (!ip_srcid_find_id(srcid
, &connp
->conn_laddr_v6
,
1660 IPCL_ZONEID(connp
), B_FALSE
, tcps
->tcps_netstack
)) {
1661 /* Mismatch - conn_laddr_v6 would be v4-mapped. */
1662 return (EADDRNOTAVAIL
);
1664 connp
->conn_saddr_v6
= connp
->conn_laddr_v6
;
1668 * Take care of the scope_id now.
1670 if (scope_id
!= 0 && IN6_IS_ADDR_LINKSCOPE(dstaddrp
)) {
1671 connp
->conn_ixa
->ixa_flags
|= IXAF_SCOPEID_SET
;
1672 connp
->conn_ixa
->ixa_scopeid
= scope_id
;
1674 connp
->conn_ixa
->ixa_flags
&= ~IXAF_SCOPEID_SET
;
1677 connp
->conn_flowinfo
= flowinfo
;
1678 connp
->conn_faddr_v6
= *dstaddrp
;
1679 connp
->conn_fport
= dstport
;
1682 * At this point the remote destination address and remote port fields
1683 * in the tcp-four-tuple have been filled in the tcp structure. Now we
1684 * have to see which state tcp was in so we can take appropriate action.
1686 if (tcp
->tcp_state
== TCPS_IDLE
) {
1688 * We support a quick connect capability here, allowing
1689 * clients to transition directly from IDLE to SYN_SENT
1690 * tcp_bindi will pick an unused port, insert the connection
1691 * in the bind hash and transition to BOUND state.
1693 lport
= tcp_update_next_port(tcps
->tcps_next_port_to_try
,
1695 lport
= tcp_bindi(tcp
, lport
, &connp
->conn_laddr_v6
, 0, B_TRUE
,
1702 * Lookup the route to determine a source address and the uinfo.
1703 * Setup TCP parameters based on the metrics/DCE.
1705 error
= tcp_set_destination(tcp
);
1710 * Don't let an endpoint connect to itself.
1712 if (IN6_ARE_ADDR_EQUAL(&connp
->conn_faddr_v6
, &connp
->conn_laddr_v6
) &&
1713 connp
->conn_fport
== connp
->conn_lport
)
1716 tcp
->tcp_state
= TCPS_SYN_SENT
;
1718 return (ipcl_conn_insert_v6(connp
));
1723 * Note that unlike other functions this returns a positive tli error
1724 * when it fails; it never returns an errno.
1727 tcp_disconnect_common(tcp_t
*tcp
, t_scalar_t seqnum
)
1730 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
1731 conn_t
*connp
= tcp
->tcp_connp
;
1734 * Right now, upper modules pass down a T_DISCON_REQ to TCP,
1735 * when the stream is in BOUND state. Do not send a reset,
1736 * since the destination IP address is not valid, and it can
1737 * be the initialized value of all zeros (broadcast address).
1739 if (tcp
->tcp_state
<= TCPS_BOUND
) {
1740 if (connp
->conn_debug
) {
1741 (void) strlog(TCP_MOD_ID
, 0, 1, SL_ERROR
|SL_TRACE
,
1742 "tcp_disconnect: bad state, %d", tcp
->tcp_state
);
1745 } else if (tcp
->tcp_state
>= TCPS_ESTABLISHED
) {
1746 TCPS_CONN_DEC(tcps
);
1749 if (seqnum
== -1 || tcp
->tcp_conn_req_max
== 0) {
1752 * According to TPI, for non-listeners, ignore seqnum
1754 * Following interpretation of -1 seqnum is historical
1755 * and implied TPI ? (TPI only states that for T_CONN_IND,
1756 * a valid seqnum should not be -1).
1758 * -1 means disconnect everything
1759 * regardless even on a listener.
1762 int old_state
= tcp
->tcp_state
;
1763 ip_stack_t
*ipst
= tcps
->tcps_netstack
->netstack_ip
;
1766 * The connection can't be on the tcp_time_wait_head list
1767 * since it is not detached.
1769 ASSERT(tcp
->tcp_time_wait_next
== NULL
);
1770 ASSERT(tcp
->tcp_time_wait_prev
== NULL
);
1771 ASSERT(tcp
->tcp_time_wait_expire
== 0);
1773 * If it used to be a listener, check to make sure no one else
1774 * has taken the port before switching back to LISTEN state.
1776 if (connp
->conn_ipversion
== IPV4_VERSION
) {
1777 lconnp
= ipcl_lookup_listener_v4(connp
->conn_lport
,
1778 connp
->conn_laddr_v4
, IPCL_ZONEID(connp
), ipst
);
1782 if (connp
->conn_ixa
->ixa_flags
& IXAF_SCOPEID_SET
)
1783 ifindex
= connp
->conn_ixa
->ixa_scopeid
;
1785 /* Allow conn_bound_if listeners? */
1786 lconnp
= ipcl_lookup_listener_v6(connp
->conn_lport
,
1787 &connp
->conn_laddr_v6
, ifindex
, IPCL_ZONEID(connp
),
1790 if (tcp
->tcp_conn_req_max
&& lconnp
== NULL
) {
1791 tcp
->tcp_state
= TCPS_LISTEN
;
1792 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
1793 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void,
1794 NULL
, int32_t, old_state
);
1795 } else if (old_state
> TCPS_BOUND
) {
1796 tcp
->tcp_conn_req_max
= 0;
1797 tcp
->tcp_state
= TCPS_BOUND
;
1798 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
1799 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void,
1800 NULL
, int32_t, old_state
);
1803 * If this end point is not going to become a listener,
1804 * decrement the listener connection count if
1805 * necessary. Note that we do not do this if it is
1806 * going to be a listner (the above if case) since
1807 * then it may remove the counter struct.
1809 if (tcp
->tcp_listen_cnt
!= NULL
)
1810 TCP_DECR_LISTEN_CNT(tcp
);
1813 CONN_DEC_REF(lconnp
);
1814 switch (old_state
) {
1817 TCPS_BUMP_MIB(tcps
, tcpAttemptFails
);
1819 case TCPS_ESTABLISHED
:
1820 case TCPS_CLOSE_WAIT
:
1821 TCPS_BUMP_MIB(tcps
, tcpEstabResets
);
1828 mutex_enter(&tcp
->tcp_eager_lock
);
1829 if ((tcp
->tcp_conn_req_cnt_q0
!= 0) ||
1830 (tcp
->tcp_conn_req_cnt_q
!= 0)) {
1831 tcp_eager_cleanup(tcp
, 0);
1833 mutex_exit(&tcp
->tcp_eager_lock
);
1835 tcp_xmit_ctl("tcp_disconnect", tcp
, tcp
->tcp_snxt
,
1836 tcp
->tcp_rnxt
, TH_RST
| TH_ACK
);
1841 } else if (!tcp_eager_blowoff(tcp
, seqnum
)) {
1848 * Our client hereby directs us to reject the connection request
1849 * that tcp_input_listener() marked with 'seqnum'. Rejection consists
1850 * of sending the appropriate RST, not an ICMP error.
1853 tcp_disconnect(tcp_t
*tcp
, mblk_t
*mp
)
1857 conn_t
*connp
= tcp
->tcp_connp
;
1859 ASSERT((uintptr_t)(mp
->b_wptr
- mp
->b_rptr
) <= (uintptr_t)INT_MAX
);
1860 if ((mp
->b_wptr
- mp
->b_rptr
) < sizeof (struct T_discon_req
)) {
1861 tcp_err_ack(tcp
, mp
, TPROTO
, 0);
1864 seqnum
= ((struct T_discon_req
*)mp
->b_rptr
)->SEQ_number
;
1865 error
= tcp_disconnect_common(tcp
, seqnum
);
1867 tcp_err_ack(tcp
, mp
, error
, 0);
1869 if (tcp
->tcp_state
>= TCPS_ESTABLISHED
) {
1870 /* Send M_FLUSH according to TPI */
1871 (void) putnextctl1(connp
->conn_rq
, M_FLUSH
, FLUSHRW
);
1873 mp
= mi_tpi_ok_ack_alloc(mp
);
1875 putnext(connp
->conn_rq
, mp
);
1880 * Handle reinitialization of a tcp structure.
1881 * Maintain "binding state" resetting the state to BOUND, LISTEN, or IDLE.
1884 tcp_reinit(tcp_t
*tcp
)
1887 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
1888 conn_t
*connp
= tcp
->tcp_connp
;
1891 /* tcp_reinit should never be called for detached tcp_t's */
1892 ASSERT(tcp
->tcp_listener
== NULL
);
1893 ASSERT((connp
->conn_family
== AF_INET
&&
1894 connp
->conn_ipversion
== IPV4_VERSION
) ||
1895 (connp
->conn_family
== AF_INET6
&&
1896 (connp
->conn_ipversion
== IPV4_VERSION
||
1897 connp
->conn_ipversion
== IPV6_VERSION
)));
1899 /* Cancel outstanding timers */
1900 tcp_timers_stop(tcp
);
1903 * Reset everything in the state vector, after updating global
1904 * MIB data from instance counters.
1906 TCPS_UPDATE_MIB(tcps
, tcpHCInSegs
, tcp
->tcp_ibsegs
);
1907 tcp
->tcp_ibsegs
= 0;
1908 TCPS_UPDATE_MIB(tcps
, tcpHCOutSegs
, tcp
->tcp_obsegs
);
1909 tcp
->tcp_obsegs
= 0;
1911 tcp_close_mpp(&tcp
->tcp_xmit_head
);
1912 if (tcp
->tcp_snd_zcopy_aware
)
1913 tcp_zcopy_notify(tcp
);
1914 tcp
->tcp_xmit_last
= tcp
->tcp_xmit_tail
= NULL
;
1915 tcp
->tcp_unsent
= tcp
->tcp_xmit_tail_unsent
= 0;
1916 mutex_enter(&tcp
->tcp_non_sq_lock
);
1917 if (tcp
->tcp_flow_stopped
&&
1918 TCP_UNSENT_BYTES(tcp
) <= connp
->conn_sndlowat
) {
1921 mutex_exit(&tcp
->tcp_non_sq_lock
);
1922 tcp_close_mpp(&tcp
->tcp_reass_head
);
1923 tcp
->tcp_reass_tail
= NULL
;
1924 if (tcp
->tcp_rcv_list
!= NULL
) {
1925 /* Free b_next chain */
1926 tcp_close_mpp(&tcp
->tcp_rcv_list
);
1927 tcp
->tcp_rcv_last_head
= NULL
;
1928 tcp
->tcp_rcv_last_tail
= NULL
;
1929 tcp
->tcp_rcv_cnt
= 0;
1931 tcp
->tcp_rcv_last_tail
= NULL
;
1933 if ((mp
= tcp
->tcp_urp_mp
) != NULL
) {
1935 tcp
->tcp_urp_mp
= NULL
;
1937 if ((mp
= tcp
->tcp_urp_mark_mp
) != NULL
) {
1939 tcp
->tcp_urp_mark_mp
= NULL
;
1941 if (tcp
->tcp_fused_sigurg_mp
!= NULL
) {
1942 ASSERT(!IPCL_IS_NONSTR(tcp
->tcp_connp
));
1943 freeb(tcp
->tcp_fused_sigurg_mp
);
1944 tcp
->tcp_fused_sigurg_mp
= NULL
;
1946 if (tcp
->tcp_ordrel_mp
!= NULL
) {
1947 ASSERT(!IPCL_IS_NONSTR(tcp
->tcp_connp
));
1948 freeb(tcp
->tcp_ordrel_mp
);
1949 tcp
->tcp_ordrel_mp
= NULL
;
1953 * Following is a union with two members which are
1954 * identical types and size so the following cleanup
1957 tcp_close_mpp(&tcp
->tcp_conn
.tcp_eager_conn_ind
);
1960 * The connection can't be on the tcp_time_wait_head list
1961 * since it is not detached.
1963 ASSERT(tcp
->tcp_time_wait_next
== NULL
);
1964 ASSERT(tcp
->tcp_time_wait_prev
== NULL
);
1965 ASSERT(tcp
->tcp_time_wait_expire
== 0);
1968 * Reset/preserve other values
1970 tcp_reinit_values(tcp
);
1971 ipcl_hash_remove(connp
);
1972 /* Note that ixa_cred gets cleared in ixa_cleanup */
1973 ixa_cleanup(connp
->conn_ixa
);
1974 tcp_ipsec_cleanup(tcp
);
1976 connp
->conn_laddr_v6
= connp
->conn_bound_addr_v6
;
1977 connp
->conn_saddr_v6
= connp
->conn_bound_addr_v6
;
1978 oldstate
= tcp
->tcp_state
;
1980 if (tcp
->tcp_conn_req_max
!= 0) {
1982 * This is the case when a TLI program uses the same
1983 * transport end point to accept a connection. This
1984 * makes the TCP both a listener and acceptor. When
1985 * this connection is closed, we need to set the state
1986 * back to TCPS_LISTEN. Make sure that the eager list
1989 * Note that this stream is still bound to the four
1990 * tuples of the previous connection in IP. If a new
1991 * SYN with different foreign address comes in, IP will
1992 * not find it and will send it to the global queue. In
1993 * the global queue, TCP will do a tcp_lookup_listener()
1994 * to find this stream. This works because this stream
1995 * is only removed from connected hash.
1998 tcp
->tcp_state
= TCPS_LISTEN
;
1999 tcp
->tcp_eager_next_q0
= tcp
->tcp_eager_prev_q0
= tcp
;
2000 tcp
->tcp_eager_next_drop_q0
= tcp
;
2001 tcp
->tcp_eager_prev_drop_q0
= tcp
;
2003 * Initially set conn_recv to tcp_input_listener_unbound to try
2004 * to pick a good squeue for the listener when the first SYN
2005 * arrives. tcp_input_listener_unbound sets it to
2006 * tcp_input_listener on that first SYN.
2008 connp
->conn_recv
= tcp_input_listener_unbound
;
2010 connp
->conn_proto
= IPPROTO_TCP
;
2011 connp
->conn_faddr_v6
= ipv6_all_zeros
;
2012 connp
->conn_fport
= 0;
2014 (void) ipcl_bind_insert(connp
);
2016 tcp
->tcp_state
= TCPS_BOUND
;
2020 * Initialize to default values
2022 tcp_init_values(tcp
, NULL
);
2024 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
2025 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
2028 ASSERT(tcp
->tcp_ptpbhn
!= NULL
);
2029 tcp
->tcp_rwnd
= connp
->conn_rcvbuf
;
2030 tcp
->tcp_mss
= connp
->conn_ipversion
!= IPV4_VERSION
?
2031 tcps
->tcps_mss_def_ipv6
: tcps
->tcps_mss_def_ipv4
;
2035 * Force values to zero that need be zero.
2036 * Do not touch values asociated with the BOUND or LISTEN state
2037 * since the connection will end up in that state after the reinit.
2038 * NOTE: tcp_reinit_values MUST have a line for each field in the tcp_t
2042 tcp_reinit_values(tcp
)
2045 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
2046 conn_t
*connp
= tcp
->tcp_connp
;
2052 #define DONTCARE(x) ((x) = (x))
2053 #define PRESERVE(x) ((x) = (x))
2056 PRESERVE(tcp
->tcp_bind_hash_port
);
2057 PRESERVE(tcp
->tcp_bind_hash
);
2058 PRESERVE(tcp
->tcp_ptpbhn
);
2059 PRESERVE(tcp
->tcp_acceptor_hash
);
2060 PRESERVE(tcp
->tcp_ptpahn
);
2062 /* Should be ASSERT NULL on these with new code! */
2063 ASSERT(tcp
->tcp_time_wait_next
== NULL
);
2064 ASSERT(tcp
->tcp_time_wait_prev
== NULL
);
2065 ASSERT(tcp
->tcp_time_wait_expire
== 0);
2066 PRESERVE(tcp
->tcp_state
);
2067 PRESERVE(connp
->conn_rq
);
2068 PRESERVE(connp
->conn_wq
);
2070 ASSERT(tcp
->tcp_xmit_head
== NULL
);
2071 ASSERT(tcp
->tcp_xmit_last
== NULL
);
2072 ASSERT(tcp
->tcp_unsent
== 0);
2073 ASSERT(tcp
->tcp_xmit_tail
== NULL
);
2074 ASSERT(tcp
->tcp_xmit_tail_unsent
== 0);
2076 tcp
->tcp_snxt
= 0; /* Displayed in mib */
2077 tcp
->tcp_suna
= 0; /* Displayed in mib */
2079 DONTCARE(tcp
->tcp_cwnd
); /* Init in tcp_process_options */
2081 ASSERT(tcp
->tcp_ibsegs
== 0);
2082 ASSERT(tcp
->tcp_obsegs
== 0);
2084 if (connp
->conn_ht_iphc
!= NULL
) {
2085 kmem_free(connp
->conn_ht_iphc
, connp
->conn_ht_iphc_allocated
);
2086 connp
->conn_ht_iphc
= NULL
;
2087 connp
->conn_ht_iphc_allocated
= 0;
2088 connp
->conn_ht_iphc_len
= 0;
2089 connp
->conn_ht_ulp
= NULL
;
2090 connp
->conn_ht_ulp_len
= 0;
2091 tcp
->tcp_ipha
= NULL
;
2092 tcp
->tcp_ip6h
= NULL
;
2093 tcp
->tcp_tcpha
= NULL
;
2096 /* We clear any IP_OPTIONS and extension headers */
2097 ip_pkt_free(&connp
->conn_xmit_ipp
);
2099 DONTCARE(tcp
->tcp_naglim
); /* Init in tcp_init_values */
2100 DONTCARE(tcp
->tcp_ipha
);
2101 DONTCARE(tcp
->tcp_ip6h
);
2102 DONTCARE(tcp
->tcp_tcpha
);
2103 tcp
->tcp_valid_bits
= 0;
2105 DONTCARE(tcp
->tcp_timer_backoff
); /* Init in tcp_init_values */
2106 DONTCARE(tcp
->tcp_last_recv_time
); /* Init in tcp_init_values */
2107 tcp
->tcp_last_rcv_lbolt
= 0;
2109 tcp
->tcp_init_cwnd
= 0;
2111 tcp
->tcp_urp_last_valid
= 0;
2112 tcp
->tcp_hard_binding
= 0;
2114 tcp
->tcp_fin_acked
= 0;
2115 tcp
->tcp_fin_rcvd
= 0;
2116 tcp
->tcp_fin_sent
= 0;
2117 tcp
->tcp_ordrel_done
= 0;
2119 tcp
->tcp_detached
= 0;
2121 tcp
->tcp_snd_ws_ok
= B_FALSE
;
2122 tcp
->tcp_snd_ts_ok
= B_FALSE
;
2123 tcp
->tcp_zero_win_probe
= 0;
2125 tcp
->tcp_loopback
= 0;
2126 tcp
->tcp_localnet
= 0;
2127 tcp
->tcp_syn_defense
= 0;
2128 tcp
->tcp_set_timer
= 0;
2130 tcp
->tcp_active_open
= 0;
2131 tcp
->tcp_rexmit
= B_FALSE
;
2132 tcp
->tcp_xmit_zc_clean
= B_FALSE
;
2134 tcp
->tcp_snd_sack_ok
= B_FALSE
;
2135 tcp
->tcp_hwcksum
= B_FALSE
;
2137 DONTCARE(tcp
->tcp_maxpsz_multiplier
); /* Init in tcp_init_values */
2139 tcp
->tcp_conn_def_q0
= 0;
2140 tcp
->tcp_ip_forward_progress
= B_FALSE
;
2141 tcp
->tcp_ecn_ok
= B_FALSE
;
2143 tcp
->tcp_cwr
= B_FALSE
;
2144 tcp
->tcp_ecn_echo_on
= B_FALSE
;
2145 tcp
->tcp_is_wnd_shrnk
= B_FALSE
;
2147 TCP_NOTSACK_REMOVE_ALL(tcp
->tcp_notsack_list
, tcp
);
2148 bzero(&tcp
->tcp_sack_info
, sizeof (tcp_sack_info_t
));
2150 tcp
->tcp_rcv_ws
= 0;
2151 tcp
->tcp_snd_ws
= 0;
2152 tcp
->tcp_ts_recent
= 0;
2153 tcp
->tcp_rnxt
= 0; /* Displayed in mib */
2154 DONTCARE(tcp
->tcp_rwnd
); /* Set in tcp_reinit() */
2155 tcp
->tcp_initial_pmtu
= 0;
2157 ASSERT(tcp
->tcp_reass_head
== NULL
);
2158 ASSERT(tcp
->tcp_reass_tail
== NULL
);
2160 tcp
->tcp_cwnd_cnt
= 0;
2162 ASSERT(tcp
->tcp_rcv_list
== NULL
);
2163 ASSERT(tcp
->tcp_rcv_last_head
== NULL
);
2164 ASSERT(tcp
->tcp_rcv_last_tail
== NULL
);
2165 ASSERT(tcp
->tcp_rcv_cnt
== 0);
2167 DONTCARE(tcp
->tcp_cwnd_ssthresh
); /* Init in tcp_set_destination */
2168 DONTCARE(tcp
->tcp_cwnd_max
); /* Init in tcp_init_values */
2171 tcp
->tcp_rto
= 0; /* Displayed in MIB */
2172 DONTCARE(tcp
->tcp_rtt_sa
); /* Init in tcp_init_values */
2173 DONTCARE(tcp
->tcp_rtt_sd
); /* Init in tcp_init_values */
2174 tcp
->tcp_rtt_update
= 0;
2176 DONTCARE(tcp
->tcp_swl1
); /* Init in case TCPS_LISTEN/TCPS_SYN_SENT */
2177 DONTCARE(tcp
->tcp_swl2
); /* Init in case TCPS_LISTEN/TCPS_SYN_SENT */
2179 tcp
->tcp_rack
= 0; /* Displayed in mib */
2180 tcp
->tcp_rack_cnt
= 0;
2181 tcp
->tcp_rack_cur_max
= 0;
2182 tcp
->tcp_rack_abs_max
= 0;
2184 tcp
->tcp_max_swnd
= 0;
2186 ASSERT(tcp
->tcp_listener
== NULL
);
2188 DONTCARE(tcp
->tcp_irs
); /* tcp_valid_bits cleared */
2189 DONTCARE(tcp
->tcp_iss
); /* tcp_valid_bits cleared */
2190 DONTCARE(tcp
->tcp_fss
); /* tcp_valid_bits cleared */
2191 DONTCARE(tcp
->tcp_urg
); /* tcp_valid_bits cleared */
2193 ASSERT(tcp
->tcp_conn_req_cnt_q
== 0);
2194 ASSERT(tcp
->tcp_conn_req_cnt_q0
== 0);
2195 PRESERVE(tcp
->tcp_conn_req_max
);
2196 PRESERVE(tcp
->tcp_conn_req_seqnum
);
2198 DONTCARE(tcp
->tcp_first_timer_threshold
); /* Init in tcp_init_values */
2199 DONTCARE(tcp
->tcp_second_timer_threshold
); /* Init in tcp_init_values */
2200 DONTCARE(tcp
->tcp_first_ctimer_threshold
); /* Init in tcp_init_values */
2201 DONTCARE(tcp
->tcp_second_ctimer_threshold
); /* in tcp_init_values */
2203 DONTCARE(tcp
->tcp_urp_last
); /* tcp_urp_last_valid is cleared */
2204 ASSERT(tcp
->tcp_urp_mp
== NULL
);
2205 ASSERT(tcp
->tcp_urp_mark_mp
== NULL
);
2206 ASSERT(tcp
->tcp_fused_sigurg_mp
== NULL
);
2208 ASSERT(tcp
->tcp_eager_next_q
== NULL
);
2209 ASSERT(tcp
->tcp_eager_last_q
== NULL
);
2210 ASSERT((tcp
->tcp_eager_next_q0
== NULL
&&
2211 tcp
->tcp_eager_prev_q0
== NULL
) ||
2212 tcp
->tcp_eager_next_q0
== tcp
->tcp_eager_prev_q0
);
2213 ASSERT(tcp
->tcp_conn
.tcp_eager_conn_ind
== NULL
);
2215 ASSERT((tcp
->tcp_eager_next_drop_q0
== NULL
&&
2216 tcp
->tcp_eager_prev_drop_q0
== NULL
) ||
2217 tcp
->tcp_eager_next_drop_q0
== tcp
->tcp_eager_prev_drop_q0
);
2219 DONTCARE(tcp
->tcp_ka_rinterval
); /* Init in tcp_init_values */
2220 DONTCARE(tcp
->tcp_ka_abort_thres
); /* Init in tcp_init_values */
2221 DONTCARE(tcp
->tcp_ka_cnt
); /* Init in tcp_init_values */
2223 tcp
->tcp_client_errno
= 0;
2225 DONTCARE(connp
->conn_sum
); /* Init in tcp_init_values */
2227 connp
->conn_faddr_v6
= ipv6_all_zeros
; /* Displayed in MIB */
2229 PRESERVE(connp
->conn_bound_addr_v6
);
2230 tcp
->tcp_last_sent_len
= 0;
2231 tcp
->tcp_dupack_cnt
= 0;
2233 connp
->conn_fport
= 0; /* Displayed in MIB */
2234 PRESERVE(connp
->conn_lport
);
2236 PRESERVE(tcp
->tcp_acceptor_lockp
);
2238 ASSERT(tcp
->tcp_ordrel_mp
== NULL
);
2239 PRESERVE(tcp
->tcp_acceptor_id
);
2240 DONTCARE(tcp
->tcp_ipsec_overhead
);
2242 PRESERVE(connp
->conn_family
);
2243 /* Remove any remnants of mapped address binding */
2244 if (connp
->conn_family
== AF_INET6
) {
2245 connp
->conn_ipversion
= IPV6_VERSION
;
2246 tcp
->tcp_mss
= tcps
->tcps_mss_def_ipv6
;
2248 connp
->conn_ipversion
= IPV4_VERSION
;
2249 tcp
->tcp_mss
= tcps
->tcps_mss_def_ipv4
;
2252 connp
->conn_bound_if
= 0;
2253 connp
->conn_recv_ancillary
.crb_all
= 0;
2254 tcp
->tcp_recvifindex
= 0;
2255 tcp
->tcp_recvhops
= 0;
2256 tcp
->tcp_closed
= 0;
2257 if (tcp
->tcp_hopopts
!= NULL
) {
2258 mi_free(tcp
->tcp_hopopts
);
2259 tcp
->tcp_hopopts
= NULL
;
2260 tcp
->tcp_hopoptslen
= 0;
2262 ASSERT(tcp
->tcp_hopoptslen
== 0);
2263 if (tcp
->tcp_dstopts
!= NULL
) {
2264 mi_free(tcp
->tcp_dstopts
);
2265 tcp
->tcp_dstopts
= NULL
;
2266 tcp
->tcp_dstoptslen
= 0;
2268 ASSERT(tcp
->tcp_dstoptslen
== 0);
2269 if (tcp
->tcp_rthdrdstopts
!= NULL
) {
2270 mi_free(tcp
->tcp_rthdrdstopts
);
2271 tcp
->tcp_rthdrdstopts
= NULL
;
2272 tcp
->tcp_rthdrdstoptslen
= 0;
2274 ASSERT(tcp
->tcp_rthdrdstoptslen
== 0);
2275 if (tcp
->tcp_rthdr
!= NULL
) {
2276 mi_free(tcp
->tcp_rthdr
);
2277 tcp
->tcp_rthdr
= NULL
;
2278 tcp
->tcp_rthdrlen
= 0;
2280 ASSERT(tcp
->tcp_rthdrlen
== 0);
2282 /* Reset fusion-related fields */
2283 tcp
->tcp_fused
= B_FALSE
;
2284 tcp
->tcp_unfusable
= B_FALSE
;
2285 tcp
->tcp_fused_sigurg
= B_FALSE
;
2286 tcp
->tcp_loopback_peer
= NULL
;
2288 tcp
->tcp_lso
= B_FALSE
;
2290 tcp
->tcp_in_ack_unsent
= 0;
2291 tcp
->tcp_cork
= B_FALSE
;
2292 tcp
->tcp_tconnind_started
= B_FALSE
;
2294 PRESERVE(tcp
->tcp_squeue_bytes
);
2296 tcp
->tcp_closemp_used
= B_FALSE
;
2298 PRESERVE(tcp
->tcp_rsrv_mp
);
2299 PRESERVE(tcp
->tcp_rsrv_mp_lock
);
2302 DONTCARE(tcp
->tcmp_stk
[0]);
2305 PRESERVE(tcp
->tcp_connid
);
2307 ASSERT(tcp
->tcp_listen_cnt
== NULL
);
2308 ASSERT(tcp
->tcp_reass_tid
== 0);
2315 * Initialize the various fields in tcp_t. If parent (the listener) is non
2316 * NULL, certain values will be inheritted from it.
2319 tcp_init_values(tcp_t
*tcp
, tcp_t
*parent
)
2321 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
2322 conn_t
*connp
= tcp
->tcp_connp
;
2325 ASSERT((connp
->conn_family
== AF_INET
&&
2326 connp
->conn_ipversion
== IPV4_VERSION
) ||
2327 (connp
->conn_family
== AF_INET6
&&
2328 (connp
->conn_ipversion
== IPV4_VERSION
||
2329 connp
->conn_ipversion
== IPV6_VERSION
)));
2331 if (parent
== NULL
) {
2332 tcp
->tcp_naglim
= tcps
->tcps_naglim_def
;
2334 tcp
->tcp_rto_initial
= tcps
->tcps_rexmit_interval_initial
;
2335 tcp
->tcp_rto_min
= tcps
->tcps_rexmit_interval_min
;
2336 tcp
->tcp_rto_max
= tcps
->tcps_rexmit_interval_max
;
2338 tcp
->tcp_first_ctimer_threshold
=
2339 tcps
->tcps_ip_notify_cinterval
;
2340 tcp
->tcp_second_ctimer_threshold
=
2341 tcps
->tcps_ip_abort_cinterval
;
2342 tcp
->tcp_first_timer_threshold
= tcps
->tcps_ip_notify_interval
;
2343 tcp
->tcp_second_timer_threshold
= tcps
->tcps_ip_abort_interval
;
2345 tcp
->tcp_fin_wait_2_flush_interval
=
2346 tcps
->tcps_fin_wait_2_flush_interval
;
2348 tcp
->tcp_ka_interval
= tcps
->tcps_keepalive_interval
;
2349 tcp
->tcp_ka_abort_thres
= tcps
->tcps_keepalive_abort_interval
;
2350 tcp
->tcp_ka_cnt
= 0;
2351 tcp
->tcp_ka_rinterval
= 0;
2354 * Default value of tcp_init_cwnd is 0, so no need to set here
2355 * if parent is NULL. But we need to inherit it from parent.
2358 /* Inherit various TCP parameters from the parent. */
2359 tcp
->tcp_naglim
= parent
->tcp_naglim
;
2361 tcp
->tcp_rto_initial
= parent
->tcp_rto_initial
;
2362 tcp
->tcp_rto_min
= parent
->tcp_rto_min
;
2363 tcp
->tcp_rto_max
= parent
->tcp_rto_max
;
2365 tcp
->tcp_first_ctimer_threshold
=
2366 parent
->tcp_first_ctimer_threshold
;
2367 tcp
->tcp_second_ctimer_threshold
=
2368 parent
->tcp_second_ctimer_threshold
;
2369 tcp
->tcp_first_timer_threshold
=
2370 parent
->tcp_first_timer_threshold
;
2371 tcp
->tcp_second_timer_threshold
=
2372 parent
->tcp_second_timer_threshold
;
2374 tcp
->tcp_fin_wait_2_flush_interval
=
2375 parent
->tcp_fin_wait_2_flush_interval
;
2377 tcp
->tcp_ka_interval
= parent
->tcp_ka_interval
;
2378 tcp
->tcp_ka_abort_thres
= parent
->tcp_ka_abort_thres
;
2379 tcp
->tcp_ka_cnt
= parent
->tcp_ka_cnt
;
2380 tcp
->tcp_ka_rinterval
= parent
->tcp_ka_rinterval
;
2382 tcp
->tcp_init_cwnd
= parent
->tcp_init_cwnd
;
2386 * Initialize tcp_rtt_sa and tcp_rtt_sd so that the calculated RTO
2387 * will be close to tcp_rexmit_interval_initial. By doing this, we
2388 * allow the algorithm to adjust slowly to large fluctuations of RTT
2389 * during first few transmissions of a connection as seen in slow
2392 tcp
->tcp_rtt_sa
= tcp
->tcp_rto_initial
<< 2;
2393 tcp
->tcp_rtt_sd
= tcp
->tcp_rto_initial
>> 1;
2394 rto
= (tcp
->tcp_rtt_sa
>> 3) + tcp
->tcp_rtt_sd
+
2395 tcps
->tcps_rexmit_interval_extra
+ (tcp
->tcp_rtt_sa
>> 5) +
2396 tcps
->tcps_conn_grace_period
;
2397 TCP_SET_RTO(tcp
, rto
);
2399 tcp
->tcp_timer_backoff
= 0;
2400 tcp
->tcp_ms_we_have_waited
= 0;
2401 tcp
->tcp_last_recv_time
= ddi_get_lbolt();
2402 tcp
->tcp_cwnd_max
= tcps
->tcps_cwnd_max_
;
2403 tcp
->tcp_cwnd_ssthresh
= TCP_MAX_LARGEWIN
;
2405 tcp
->tcp_maxpsz_multiplier
= tcps
->tcps_maxpsz_multiplier
;
2407 /* NOTE: ISS is now set in tcp_set_destination(). */
2409 /* Reset fusion-related fields */
2410 tcp
->tcp_fused
= B_FALSE
;
2411 tcp
->tcp_unfusable
= B_FALSE
;
2412 tcp
->tcp_fused_sigurg
= B_FALSE
;
2413 tcp
->tcp_loopback_peer
= NULL
;
2415 /* We rebuild the header template on the next connect/conn_request */
2418 * Init the window scale to the max so tcp_rwnd_set() won't pare
2419 * down tcp_rwnd. tcp_set_destination() will set the right value later.
2421 tcp
->tcp_rcv_ws
= TCP_MAX_WINSHIFT
;
2422 tcp
->tcp_rwnd
= connp
->conn_rcvbuf
;
2424 tcp
->tcp_cork
= B_FALSE
;
2426 * Init the tcp_debug option if it wasn't already set. This value
2427 * determines whether TCP
2428 * calls strlog() to print out debug messages. Doing this
2429 * initialization here means that this value is not inherited thru
2432 if (!connp
->conn_debug
)
2433 connp
->conn_debug
= tcps
->tcps_dbg
;
2437 * Update the TCP connection according to change of PMTU.
2439 * Path MTU might have changed by either increase or decrease, so need to
2440 * adjust the MSS based on the value of ixa_pmtu. No need to handle tiny
2441 * or negative MSS, since tcp_mss_set() will do it.
2444 tcp_update_pmtu(tcp_t
*tcp
, boolean_t decrease_only
)
2448 conn_t
*connp
= tcp
->tcp_connp
;
2449 ip_xmit_attr_t
*ixa
= connp
->conn_ixa
;
2452 if (tcp
->tcp_tcps
->tcps_ignore_path_mtu
)
2455 if (tcp
->tcp_state
< TCPS_ESTABLISHED
)
2459 * Always call ip_get_pmtu() to make sure that IP has updated
2460 * ixa_flags properly.
2462 pmtu
= ip_get_pmtu(ixa
);
2463 ixaflags
= ixa
->ixa_flags
;
2466 * Calculate the MSS by decreasing the PMTU by conn_ht_iphc_len and
2467 * IPsec overhead if applied. Make sure to use the most recent
2468 * IPsec information.
2470 mss
= pmtu
- connp
->conn_ht_iphc_len
- conn_ipsec_length(connp
);
2473 * Nothing to change, so just return.
2475 if (mss
== tcp
->tcp_mss
)
2479 * Currently, for ICMP errors, only PMTU decrease is handled.
2481 if (mss
> tcp
->tcp_mss
&& decrease_only
)
2484 DTRACE_PROBE2(tcp_update_pmtu
, int32_t, tcp
->tcp_mss
, uint32_t, mss
);
2487 * Update ixa_fragsize and ixa_pmtu.
2489 ixa
->ixa_fragsize
= ixa
->ixa_pmtu
= pmtu
;
2492 * Adjust MSS and all relevant variables.
2494 tcp_mss_set(tcp
, mss
);
2497 * If the PMTU is below the min size maintained by IP, then ip_get_pmtu
2498 * has set IXAF_PMTU_TOO_SMALL and cleared IXAF_PMTU_IPV4_DF. Since TCP
2499 * has a (potentially different) min size we do the same. Make sure to
2500 * clear IXAF_DONTFRAG, which is used by IP to decide whether to
2501 * fragment the packet.
2503 * LSO over IPv6 can not be fragmented. So need to disable LSO
2504 * when IPv6 fragmentation is needed.
2506 if (mss
< tcp
->tcp_tcps
->tcps_mss_min
)
2507 ixaflags
|= IXAF_PMTU_TOO_SMALL
;
2509 if (ixaflags
& IXAF_PMTU_TOO_SMALL
)
2510 ixaflags
&= ~(IXAF_DONTFRAG
| IXAF_PMTU_IPV4_DF
);
2512 if ((connp
->conn_ipversion
== IPV4_VERSION
) &&
2513 !(ixaflags
& IXAF_PMTU_IPV4_DF
)) {
2514 tcp
->tcp_ipha
->ipha_fragment_offset_and_flags
= 0;
2516 ixa
->ixa_flags
= ixaflags
;
2520 tcp_maxpsz_set(tcp_t
*tcp
, boolean_t set_maxblk
)
2522 conn_t
*connp
= tcp
->tcp_connp
;
2523 queue_t
*q
= connp
->conn_rq
;
2524 int32_t mss
= tcp
->tcp_mss
;
2527 if (TCP_IS_DETACHED(tcp
))
2529 if (tcp
->tcp_fused
) {
2530 maxpsz
= tcp_fuse_maxpsz(tcp
);
2532 } else if (tcp
->tcp_maxpsz_multiplier
== 0) {
2534 * Set the sd_qn_maxpsz according to the socket send buffer
2535 * size, and sd_maxblk to INFPSZ (-1). This will essentially
2536 * instruct the stream head to copyin user data into contiguous
2537 * kernel-allocated buffers without breaking it up into smaller
2538 * chunks. We round up the buffer size to the nearest SMSS.
2540 maxpsz
= MSS_ROUNDUP(connp
->conn_sndbuf
, mss
);
2544 * Set sd_qn_maxpsz to approx half the (receivers) buffer
2545 * (and a multiple of the mss). This instructs the stream
2546 * head to break down larger than SMSS writes into SMSS-
2547 * size mblks, up to tcp_maxpsz_multiplier mblks at a time.
2549 maxpsz
= tcp
->tcp_maxpsz_multiplier
* mss
;
2550 if (maxpsz
> connp
->conn_sndbuf
/ 2) {
2551 maxpsz
= connp
->conn_sndbuf
/ 2;
2552 /* Round up to nearest mss */
2553 maxpsz
= MSS_ROUNDUP(maxpsz
, mss
);
2557 (void) proto_set_maxpsz(q
, connp
, maxpsz
);
2558 if (!(IPCL_IS_NONSTR(connp
)))
2559 connp
->conn_wq
->q_maxpsz
= maxpsz
;
2561 (void) proto_set_tx_maxblk(q
, connp
, mss
);
2565 /* For /dev/tcp aka AF_INET open */
2567 tcp_openv4(queue_t
*q
, dev_t
*devp
, int flag
, int sflag
, cred_t
*credp
)
2569 return (tcp_open(q
, devp
, flag
, sflag
, credp
, B_FALSE
));
2572 /* For /dev/tcp6 aka AF_INET6 open */
2574 tcp_openv6(queue_t
*q
, dev_t
*devp
, int flag
, int sflag
, cred_t
*credp
)
2576 return (tcp_open(q
, devp
, flag
, sflag
, credp
, B_TRUE
));
2580 tcp_create_common(cred_t
*credp
, boolean_t isv6
, boolean_t issocket
,
2589 ASSERT(errorp
!= NULL
);
2591 * Find the proper zoneid and netstack.
2594 * Special case for install: miniroot needs to be able to
2595 * access files via NFS as though it were always in the
2598 if (credp
== kcred
&& nfs_global_client_only
!= 0) {
2599 zoneid
= GLOBAL_ZONEID
;
2600 tcps
= netstack_find_by_stackid(GLOBAL_NETSTACKID
)->
2602 ASSERT(tcps
!= NULL
);
2607 if ((err
= secpolicy_basic_net_access(credp
)) != 0) {
2612 ns
= netstack_find_by_cred(credp
);
2614 tcps
= ns
->netstack_tcp
;
2615 ASSERT(tcps
!= NULL
);
2618 * For exclusive stacks we set the zoneid to zero
2619 * to make TCP operate as if in the global zone.
2621 if (tcps
->tcps_netstack
->netstack_stackid
!=
2623 zoneid
= GLOBAL_ZONEID
;
2625 zoneid
= crgetzoneid(credp
);
2628 sqp
= IP_SQUEUE_GET((uint_t
)gethrtime());
2629 connp
= (conn_t
*)tcp_get_conn(sqp
, tcps
);
2631 * Both tcp_get_conn and netstack_find_by_cred incremented refcnt,
2632 * so we drop it by one.
2634 netstack_rele(tcps
->tcps_netstack
);
2635 if (connp
== NULL
) {
2639 ASSERT(connp
->conn_ixa
->ixa_protocol
== connp
->conn_proto
);
2641 connp
->conn_sqp
= sqp
;
2642 connp
->conn_initial_sqp
= connp
->conn_sqp
;
2643 connp
->conn_ixa
->ixa_sqp
= connp
->conn_sqp
;
2644 tcp
= connp
->conn_tcp
;
2647 * Besides asking IP to set the checksum for us, have conn_ip_output
2648 * to do the following checks when necessary:
2650 * IXAF_VERIFY_SOURCE: drop packets when our outer source goes invalid
2651 * IXAF_VERIFY_PMTU: verify PMTU changes
2652 * IXAF_VERIFY_LSO: verify LSO capability changes
2654 connp
->conn_ixa
->ixa_flags
|= IXAF_SET_ULP_CKSUM
| IXAF_VERIFY_SOURCE
|
2655 IXAF_VERIFY_PMTU
| IXAF_VERIFY_LSO
;
2657 if (!tcps
->tcps_dev_flow_ctl
)
2658 connp
->conn_ixa
->ixa_flags
|= IXAF_NO_DEV_FLOW_CTL
;
2661 connp
->conn_ixa
->ixa_src_preferences
= IPV6_PREFER_SRC_DEFAULT
;
2662 connp
->conn_ipversion
= IPV6_VERSION
;
2663 connp
->conn_family
= AF_INET6
;
2664 tcp
->tcp_mss
= tcps
->tcps_mss_def_ipv6
;
2665 connp
->conn_default_ttl
= tcps
->tcps_ipv6_hoplimit
;
2667 connp
->conn_ipversion
= IPV4_VERSION
;
2668 connp
->conn_family
= AF_INET
;
2669 tcp
->tcp_mss
= tcps
->tcps_mss_def_ipv4
;
2670 connp
->conn_default_ttl
= tcps
->tcps_ipv4_ttl
;
2672 connp
->conn_xmit_ipp
.ipp_unicast_hops
= connp
->conn_default_ttl
;
2675 connp
->conn_cred
= credp
;
2676 connp
->conn_cpid
= curproc
->p_pid
;
2677 connp
->conn_open_time
= ddi_get_lbolt64();
2679 /* Cache things in the ixa without any refhold */
2680 ASSERT(!(connp
->conn_ixa
->ixa_free_flags
& IXA_FREE_CRED
));
2681 connp
->conn_ixa
->ixa_cred
= credp
;
2682 connp
->conn_ixa
->ixa_cpid
= connp
->conn_cpid
;
2684 connp
->conn_zoneid
= zoneid
;
2685 /* conn_allzones can not be set this early, hence no IPCL_ZONEID */
2686 connp
->conn_ixa
->ixa_zoneid
= zoneid
;
2687 ASSERT(connp
->conn_netstack
== tcps
->tcps_netstack
);
2688 ASSERT(tcp
->tcp_tcps
== tcps
);
2690 connp
->conn_zone_is_global
= (crgetzoneid(credp
) == GLOBAL_ZONEID
);
2693 tcp
->tcp_issocket
= 1;
2696 connp
->conn_rcvbuf
= tcps
->tcps_recv_hiwat
;
2697 connp
->conn_sndbuf
= tcps
->tcps_xmit_hiwat
;
2698 if (tcps
->tcps_snd_lowat_fraction
!= 0) {
2699 connp
->conn_sndlowat
= connp
->conn_sndbuf
/
2700 tcps
->tcps_snd_lowat_fraction
;
2702 connp
->conn_sndlowat
= tcps
->tcps_xmit_lowat
;
2704 connp
->conn_so_type
= SOCK_STREAM
;
2705 connp
->conn_wroff
= connp
->conn_ht_iphc_allocated
+
2706 tcps
->tcps_wroff_xtra
;
2708 SOCK_CONNID_INIT(tcp
->tcp_connid
);
2709 /* DTrace ignores this - it isn't a tcp:::state-change */
2710 tcp
->tcp_state
= TCPS_IDLE
;
2711 tcp_init_values(tcp
, NULL
);
2716 tcp_open(queue_t
*q
, dev_t
*devp
, int flag
, int sflag
, cred_t
*credp
,
2720 conn_t
*connp
= NULL
;
2722 vmem_t
*minor_arena
= NULL
;
2726 if (q
->q_ptr
!= NULL
)
2729 if (sflag
== MODOPEN
)
2732 if ((ip_minor_arena_la
!= NULL
) && (flag
& SO_SOCKSTR
) &&
2733 ((conn_dev
= inet_minor_alloc(ip_minor_arena_la
)) != 0)) {
2734 minor_arena
= ip_minor_arena_la
;
2737 * Either minor numbers in the large arena were exhausted
2738 * or a non socket application is doing the open.
2739 * Try to allocate from the small arena.
2741 if ((conn_dev
= inet_minor_alloc(ip_minor_arena_sa
)) == 0) {
2744 minor_arena
= ip_minor_arena_sa
;
2747 ASSERT(minor_arena
!= NULL
);
2749 *devp
= makedevice(getmajor(*devp
), (minor_t
)conn_dev
);
2751 if (flag
& SO_FALLBACK
) {
2753 * Non streams socket needs a stream to fallback to
2755 RD(q
)->q_ptr
= (void *)conn_dev
;
2756 WR(q
)->q_qinfo
= &tcp_fallback_sock_winit
;
2757 WR(q
)->q_ptr
= (void *)minor_arena
;
2760 } else if (flag
& SO_ACCEPTOR
) {
2761 q
->q_qinfo
= &tcp_acceptor_rinit
;
2763 * the conn_dev and minor_arena will be subsequently used by
2764 * tcp_tli_accept() and tcp_tpi_close_accept() to figure out
2765 * the minor device number for this connection from the q_ptr.
2767 RD(q
)->q_ptr
= (void *)conn_dev
;
2768 WR(q
)->q_qinfo
= &tcp_acceptor_winit
;
2769 WR(q
)->q_ptr
= (void *)minor_arena
;
2774 issocket
= flag
& SO_SOCKSTR
;
2775 connp
= tcp_create_common(credp
, isv6
, issocket
, &err
);
2777 if (connp
== NULL
) {
2778 inet_minor_free(minor_arena
, conn_dev
);
2779 q
->q_ptr
= WR(q
)->q_ptr
= NULL
;
2784 connp
->conn_wq
= WR(q
);
2785 q
->q_ptr
= WR(q
)->q_ptr
= connp
;
2787 connp
->conn_dev
= conn_dev
;
2788 connp
->conn_minor_arena
= minor_arena
;
2790 ASSERT(q
->q_qinfo
== &tcp_rinitv4
|| q
->q_qinfo
== &tcp_rinitv6
);
2791 ASSERT(WR(q
)->q_qinfo
== &tcp_winit
);
2793 tcp
= connp
->conn_tcp
;
2796 WR(q
)->q_qinfo
= &tcp_sock_winit
;
2799 tcp
->tcp_acceptor_id
= (t_uscalar_t
)RD(q
);
2801 tcp
->tcp_acceptor_id
= conn_dev
;
2803 tcp_acceptor_hash_insert(tcp
->tcp_acceptor_id
, tcp
);
2807 * Put the ref for TCP. Ref for IP was already put
2808 * by ipcl_conn_create. Also Make the conn_t globally
2809 * visible to walkers
2811 mutex_enter(&connp
->conn_lock
);
2812 CONN_INC_REF_LOCKED(connp
);
2813 ASSERT(connp
->conn_ref
== 2);
2814 connp
->conn_state_flags
&= ~CONN_INCIPIENT
;
2815 mutex_exit(&connp
->conn_lock
);
2822 * Build/update the tcp header template (in conn_ht_iphc) based on
2823 * conn_xmit_ipp. The headers include ip6_t, any extension
2824 * headers, and the maximum size tcp header (to avoid reallocation
2825 * on the fly for additional tcp options).
2827 * Assumes the caller has already set conn_{faddr,laddr,fport,lport,flowinfo}.
2828 * Returns failure if can't allocate memory.
2831 tcp_build_hdrs(tcp_t
*tcp
)
2833 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
2834 conn_t
*connp
= tcp
->tcp_connp
;
2835 char buf
[TCP_MAX_HDR_LENGTH
];
2837 uint_t ulplen
= TCP_MIN_HEADER_LENGTH
;
2838 uint_t extralen
= TCP_MAX_TCP_OPTIONS_LENGTH
;
2844 * We might be called after the connection is set up, and we might
2845 * have TS options already in the TCP header. Thus we save any
2846 * existing tcp header.
2848 buflen
= connp
->conn_ht_ulp_len
;
2850 bcopy(connp
->conn_ht_ulp
, buf
, buflen
);
2851 extralen
-= buflen
- ulplen
;
2855 /* Grab lock to satisfy ASSERT; TCP is serialized using squeue */
2856 mutex_enter(&connp
->conn_lock
);
2857 error
= conn_build_hdr_template(connp
, ulplen
, extralen
,
2858 &connp
->conn_laddr_v6
, &connp
->conn_faddr_v6
, connp
->conn_flowinfo
);
2859 mutex_exit(&connp
->conn_lock
);
2864 * Any routing header/option has been massaged. The checksum difference
2865 * is stored in conn_sum for later use.
2867 tcpha
= (tcpha_t
*)connp
->conn_ht_ulp
;
2868 tcp
->tcp_tcpha
= tcpha
;
2870 /* restore any old tcp header */
2872 bcopy(buf
, connp
->conn_ht_ulp
, buflen
);
2877 tcpha
->tha_offset_and_reserved
= (5 << 4);
2878 tcpha
->tha_lport
= connp
->conn_lport
;
2879 tcpha
->tha_fport
= connp
->conn_fport
;
2883 * IP wants our header length in the checksum field to
2884 * allow it to perform a single pseudo-header+checksum
2885 * calculation on behalf of TCP.
2886 * Include the adjustment for a source route once IP_OPTIONS is set.
2888 cksum
= sizeof (tcpha_t
) + connp
->conn_sum
;
2889 cksum
= (cksum
>> 16) + (cksum
& 0xFFFF);
2890 ASSERT(cksum
< 0x10000);
2891 tcpha
->tha_sum
= htons(cksum
);
2893 if (connp
->conn_ipversion
== IPV4_VERSION
)
2894 tcp
->tcp_ipha
= (ipha_t
*)connp
->conn_ht_iphc
;
2896 tcp
->tcp_ip6h
= (ip6_t
*)connp
->conn_ht_iphc
;
2898 if (connp
->conn_ht_iphc_allocated
+ tcps
->tcps_wroff_xtra
>
2899 connp
->conn_wroff
) {
2900 connp
->conn_wroff
= connp
->conn_ht_iphc_allocated
+
2901 tcps
->tcps_wroff_xtra
;
2902 (void) proto_set_tx_wroff(connp
->conn_rq
, connp
,
2909 * tcp_rwnd_set() is called to adjust the receive window to a desired value.
2910 * We do not allow the receive window to shrink. After setting rwnd,
2911 * set the flow control hiwat of the stream.
2913 * This function is called in 2 cases:
2915 * 1) Before data transfer begins, in tcp_input_listener() for accepting a
2916 * connection (passive open) and in tcp_input_data() for active connect.
2917 * This is called after tcp_mss_set() when the desired MSS value is known.
2918 * This makes sure that our window size is a mutiple of the other side's
2920 * 2) Handling SO_RCVBUF option.
2922 * It is ASSUMED that the requested size is a multiple of the current MSS.
2924 * XXX - Should allow a lower rwnd than tcp_recv_hiwat_minmss * mss if the
2928 tcp_rwnd_set(tcp_t
*tcp
, uint32_t rwnd
)
2930 uint32_t mss
= tcp
->tcp_mss
;
2931 uint32_t old_max_rwnd
;
2932 uint32_t max_transmittable_rwnd
;
2933 boolean_t tcp_detached
= TCP_IS_DETACHED(tcp
);
2934 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
2935 conn_t
*connp
= tcp
->tcp_connp
;
2938 * Insist on a receive window that is at least
2939 * tcp_recv_hiwat_minmss * MSS (default 4 * MSS) to avoid
2940 * funny TCP interactions of Nagle algorithm, SWS avoidance
2941 * and delayed acknowledgement.
2943 rwnd
= MAX(rwnd
, tcps
->tcps_recv_hiwat_minmss
* mss
);
2945 if (tcp
->tcp_fused
) {
2947 tcp_t
*peer_tcp
= tcp
->tcp_loopback_peer
;
2949 ASSERT(peer_tcp
!= NULL
);
2950 sth_hiwat
= tcp_fuse_set_rcv_hiwat(tcp
, rwnd
);
2951 if (!tcp_detached
) {
2952 (void) proto_set_rx_hiwat(connp
->conn_rq
, connp
,
2954 tcp_set_recv_threshold(tcp
, sth_hiwat
>> 3);
2957 /* Caller could have changed tcp_rwnd; update tha_win */
2958 if (tcp
->tcp_tcpha
!= NULL
) {
2959 tcp
->tcp_tcpha
->tha_win
=
2960 htons(tcp
->tcp_rwnd
>> tcp
->tcp_rcv_ws
);
2962 if ((tcp
->tcp_rcv_ws
> 0) && rwnd
> tcp
->tcp_cwnd_max
)
2963 tcp
->tcp_cwnd_max
= rwnd
;
2966 * In the fusion case, the maxpsz stream head value of
2967 * our peer is set according to its send buffer size
2968 * and our receive buffer size; since the latter may
2969 * have changed we need to update the peer's maxpsz.
2971 (void) tcp_maxpsz_set(peer_tcp
, B_TRUE
);
2976 old_max_rwnd
= tcp
->tcp_rwnd
;
2978 old_max_rwnd
= connp
->conn_rcvbuf
;
2982 * If window size info has already been exchanged, TCP should not
2983 * shrink the window. Shrinking window is doable if done carefully.
2984 * We may add that support later. But so far there is not a real
2987 if (rwnd
< old_max_rwnd
&& tcp
->tcp_state
> TCPS_SYN_SENT
) {
2988 /* MSS may have changed, do a round up again. */
2989 rwnd
= MSS_ROUNDUP(old_max_rwnd
, mss
);
2993 * tcp_rcv_ws starts with TCP_MAX_WINSHIFT so the following check
2994 * can be applied even before the window scale option is decided.
2996 max_transmittable_rwnd
= TCP_MAXWIN
<< tcp
->tcp_rcv_ws
;
2997 if (rwnd
> max_transmittable_rwnd
) {
2998 rwnd
= max_transmittable_rwnd
-
2999 (max_transmittable_rwnd
% mss
);
3001 rwnd
= max_transmittable_rwnd
;
3003 * If we're over the limit we may have to back down tcp_rwnd.
3004 * The increment below won't work for us. So we set all three
3005 * here and the increment below will have no effect.
3007 tcp
->tcp_rwnd
= old_max_rwnd
= rwnd
;
3009 if (tcp
->tcp_localnet
) {
3010 tcp
->tcp_rack_abs_max
=
3011 MIN(tcps
->tcps_local_dacks_max
, rwnd
/ mss
/ 2);
3014 * For a remote host on a different subnet (through a router),
3015 * we ack every other packet to be conforming to RFC1122.
3016 * tcp_deferred_acks_max is default to 2.
3018 tcp
->tcp_rack_abs_max
=
3019 MIN(tcps
->tcps_deferred_acks_max
, rwnd
/ mss
/ 2);
3021 if (tcp
->tcp_rack_cur_max
> tcp
->tcp_rack_abs_max
)
3022 tcp
->tcp_rack_cur_max
= tcp
->tcp_rack_abs_max
;
3024 tcp
->tcp_rack_cur_max
= 0;
3026 * Increment the current rwnd by the amount the maximum grew (we
3027 * can not overwrite it since we might be in the middle of a
3030 tcp
->tcp_rwnd
+= rwnd
- old_max_rwnd
;
3031 connp
->conn_rcvbuf
= rwnd
;
3033 /* Are we already connected? */
3034 if (tcp
->tcp_tcpha
!= NULL
) {
3035 tcp
->tcp_tcpha
->tha_win
=
3036 htons(tcp
->tcp_rwnd
>> tcp
->tcp_rcv_ws
);
3039 if ((tcp
->tcp_rcv_ws
> 0) && rwnd
> tcp
->tcp_cwnd_max
)
3040 tcp
->tcp_cwnd_max
= rwnd
;
3045 tcp_set_recv_threshold(tcp
, rwnd
>> 3);
3047 (void) proto_set_rx_hiwat(connp
->conn_rq
, connp
, rwnd
);
3052 tcp_do_unbind(conn_t
*connp
)
3054 tcp_t
*tcp
= connp
->conn_tcp
;
3057 switch (tcp
->tcp_state
) {
3062 return (-TOUTSTATE
);
3066 * Need to clean up all the eagers since after the unbind, segments
3067 * will no longer be delivered to this listener stream.
3069 mutex_enter(&tcp
->tcp_eager_lock
);
3070 if (tcp
->tcp_conn_req_cnt_q0
!= 0 || tcp
->tcp_conn_req_cnt_q
!= 0) {
3071 tcp_eager_cleanup(tcp
, 0);
3073 mutex_exit(&tcp
->tcp_eager_lock
);
3075 /* Clean up the listener connection counter if necessary. */
3076 if (tcp
->tcp_listen_cnt
!= NULL
)
3077 TCP_DECR_LISTEN_CNT(tcp
);
3078 connp
->conn_laddr_v6
= ipv6_all_zeros
;
3079 connp
->conn_saddr_v6
= ipv6_all_zeros
;
3080 tcp_bind_hash_remove(tcp
);
3081 oldstate
= tcp
->tcp_state
;
3082 tcp
->tcp_state
= TCPS_IDLE
;
3083 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
3084 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
3088 bzero(&connp
->conn_ports
, sizeof (connp
->conn_ports
));
3094 * Collect protocol properties to send to the upper handle.
3097 tcp_get_proto_props(tcp_t
*tcp
, struct sock_proto_props
*sopp
)
3099 conn_t
*connp
= tcp
->tcp_connp
;
3101 sopp
->sopp_flags
= SOCKOPT_RCVHIWAT
| SOCKOPT_MAXBLK
| SOCKOPT_WROFF
;
3102 sopp
->sopp_maxblk
= tcp_maxpsz_set(tcp
, B_FALSE
);
3104 sopp
->sopp_rxhiwat
= tcp
->tcp_fused
?
3105 tcp_fuse_set_rcv_hiwat(tcp
, connp
->conn_rcvbuf
) :
3108 * Determine what write offset value to use depending on SACK and
3109 * whether the endpoint is fused or not.
3111 if (tcp
->tcp_fused
) {
3112 ASSERT(tcp
->tcp_loopback
);
3113 ASSERT(tcp
->tcp_loopback_peer
!= NULL
);
3115 * For fused tcp loopback, set the stream head's write
3116 * offset value to zero since we won't be needing any room
3117 * for TCP/IP headers. This would also improve performance
3118 * since it would reduce the amount of work done by kmem.
3119 * Non-fused tcp loopback case is handled separately below.
3121 sopp
->sopp_wroff
= 0;
3123 * Update the peer's transmit parameters according to
3124 * our recently calculated high water mark value.
3126 (void) tcp_maxpsz_set(tcp
->tcp_loopback_peer
, B_TRUE
);
3127 } else if (tcp
->tcp_snd_sack_ok
) {
3128 sopp
->sopp_wroff
= connp
->conn_ht_iphc_allocated
+
3129 (tcp
->tcp_loopback
? 0 : tcp
->tcp_tcps
->tcps_wroff_xtra
);
3131 sopp
->sopp_wroff
= connp
->conn_ht_iphc_len
+
3132 (tcp
->tcp_loopback
? 0 : tcp
->tcp_tcps
->tcps_wroff_xtra
);
3135 if (tcp
->tcp_loopback
) {
3136 sopp
->sopp_flags
|= SOCKOPT_LOOPBACK
;
3137 sopp
->sopp_loopback
= B_TRUE
;
3142 * Check the usability of ZEROCOPY. It's instead checking the flag set by IP.
3145 tcp_zcopy_check(tcp_t
*tcp
)
3147 conn_t
*connp
= tcp
->tcp_connp
;
3148 ip_xmit_attr_t
*ixa
= connp
->conn_ixa
;
3149 boolean_t zc_enabled
= B_FALSE
;
3150 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3152 if (do_tcpzcopy
== 2)
3153 zc_enabled
= B_TRUE
;
3154 else if ((do_tcpzcopy
== 1) && (ixa
->ixa_flags
& IXAF_ZCOPY_CAPAB
))
3155 zc_enabled
= B_TRUE
;
3157 tcp
->tcp_snd_zcopy_on
= zc_enabled
;
3158 if (!TCP_IS_DETACHED(tcp
)) {
3160 ixa
->ixa_flags
|= IXAF_VERIFY_ZCOPY
;
3161 (void) proto_set_tx_copyopt(connp
->conn_rq
, connp
,
3163 TCP_STAT(tcps
, tcp_zcopy_on
);
3165 ixa
->ixa_flags
&= ~IXAF_VERIFY_ZCOPY
;
3166 (void) proto_set_tx_copyopt(connp
->conn_rq
, connp
,
3168 TCP_STAT(tcps
, tcp_zcopy_off
);
3171 return (zc_enabled
);
3175 * Backoff from a zero-copy message by copying data to a new allocated
3176 * message and freeing the original desballoca'ed segmapped message.
3178 * This function is called by following two callers:
3179 * 1. tcp_timer: fix_xmitlist is set to B_TRUE, because it's safe to free
3180 * the origial desballoca'ed message and notify sockfs. This is in re-
3182 * 2. tcp_output: fix_xmitlist is set to B_FALSE. Flag STRUIO_ZCNOTIFY need
3183 * to be copied to new message.
3186 tcp_zcopy_backoff(tcp_t
*tcp
, mblk_t
*bp
, boolean_t fix_xmitlist
)
3189 mblk_t
*head
= NULL
;
3190 mblk_t
*tail
= NULL
;
3191 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3194 while (bp
!= NULL
) {
3195 if (IS_VMLOANED_MBLK(bp
)) {
3196 TCP_STAT(tcps
, tcp_zcopy_backoff
);
3197 if ((nbp
= copyb(bp
)) == NULL
) {
3198 tcp
->tcp_xmit_zc_clean
= B_FALSE
;
3201 return ((head
== NULL
) ? bp
: head
);
3204 if (bp
->b_datap
->db_struioflag
& STRUIO_ZCNOTIFY
) {
3206 tcp_zcopy_notify(tcp
);
3208 nbp
->b_datap
->db_struioflag
|=
3211 nbp
->b_cont
= bp
->b_cont
;
3214 * Copy saved information and adjust tcp_xmit_tail
3218 nbp
->b_prev
= bp
->b_prev
;
3219 nbp
->b_next
= bp
->b_next
;
3221 if (tcp
->tcp_xmit_tail
== bp
)
3222 tcp
->tcp_xmit_tail
= nbp
;
3225 /* Free the original message. */
3248 tcp
->tcp_xmit_last
= tail
;
3249 tcp
->tcp_xmit_zc_clean
= B_TRUE
;
3256 tcp_zcopy_notify(tcp_t
*tcp
)
3261 if (tcp
->tcp_detached
)
3263 connp
= tcp
->tcp_connp
;
3264 if (IPCL_IS_NONSTR(connp
)) {
3265 (*connp
->conn_upcalls
->su_zcopy_notify
)
3266 (connp
->conn_upper_handle
);
3269 stp
= STREAM(connp
->conn_rq
);
3270 mutex_enter(&stp
->sd_lock
);
3271 stp
->sd_flag
|= STZCNOTIFY
;
3272 cv_broadcast(&stp
->sd_zcopy_wait
);
3273 mutex_exit(&stp
->sd_lock
);
3277 * Update the TCP connection according to change of LSO capability.
3280 tcp_update_lso(tcp_t
*tcp
, ip_xmit_attr_t
*ixa
)
3283 * We check against IPv4 header length to preserve the old behavior
3284 * of only enabling LSO when there are no IP options.
3285 * But this restriction might not be necessary at all. Before removing
3286 * it, need to verify how LSO is handled for source routing case, with
3287 * which IP does software checksum.
3289 * For IPv6, whenever any extension header is needed, LSO is supressed.
3291 if (ixa
->ixa_ip_hdr_length
!= ((ixa
->ixa_flags
& IXAF_IS_IPV4
) ?
3292 IP_SIMPLE_HDR_LENGTH
: IPV6_HDR_LEN
))
3296 * Either the LSO capability newly became usable, or it has changed.
3298 if (ixa
->ixa_flags
& IXAF_LSO_CAPAB
) {
3299 ill_lso_capab_t
*lsoc
= &ixa
->ixa_lso_capab
;
3301 ASSERT(lsoc
->ill_lso_max
> 0);
3302 tcp
->tcp_lso_max
= MIN(TCP_MAX_LSO_LENGTH
, lsoc
->ill_lso_max
);
3304 DTRACE_PROBE3(tcp_update_lso
, boolean_t
, tcp
->tcp_lso
,
3305 boolean_t
, B_TRUE
, uint32_t, tcp
->tcp_lso_max
);
3308 * If LSO to be enabled, notify the STREAM header with larger
3312 tcp
->tcp_maxpsz_multiplier
= 0;
3314 tcp
->tcp_lso
= B_TRUE
;
3315 TCP_STAT(tcp
->tcp_tcps
, tcp_lso_enabled
);
3316 } else { /* LSO capability is not usable any more. */
3317 DTRACE_PROBE3(tcp_update_lso
, boolean_t
, tcp
->tcp_lso
,
3318 boolean_t
, B_FALSE
, uint32_t, tcp
->tcp_lso_max
);
3321 * If LSO to be disabled, notify the STREAM header with smaller
3322 * data block. And need to restore fragsize to PMTU.
3325 tcp
->tcp_maxpsz_multiplier
=
3326 tcp
->tcp_tcps
->tcps_maxpsz_multiplier
;
3327 ixa
->ixa_fragsize
= ixa
->ixa_pmtu
;
3328 tcp
->tcp_lso
= B_FALSE
;
3329 TCP_STAT(tcp
->tcp_tcps
, tcp_lso_disabled
);
3333 (void) tcp_maxpsz_set(tcp
, B_TRUE
);
3337 * Update the TCP connection according to change of ZEROCOPY capability.
3340 tcp_update_zcopy(tcp_t
*tcp
)
3342 conn_t
*connp
= tcp
->tcp_connp
;
3343 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3345 if (tcp
->tcp_snd_zcopy_on
) {
3346 tcp
->tcp_snd_zcopy_on
= B_FALSE
;
3347 if (!TCP_IS_DETACHED(tcp
)) {
3348 (void) proto_set_tx_copyopt(connp
->conn_rq
, connp
,
3350 TCP_STAT(tcps
, tcp_zcopy_off
);
3353 tcp
->tcp_snd_zcopy_on
= B_TRUE
;
3354 if (!TCP_IS_DETACHED(tcp
)) {
3355 (void) proto_set_tx_copyopt(connp
->conn_rq
, connp
,
3357 TCP_STAT(tcps
, tcp_zcopy_on
);
3363 * Notify function registered with ip_xmit_attr_t. It's called in the squeue
3364 * so it's safe to update the TCP connection.
3368 tcp_notify(void *arg
, ip_xmit_attr_t
*ixa
, ixa_notify_type_t ntype
,
3369 ixa_notify_arg_t narg
)
3371 tcp_t
*tcp
= (tcp_t
*)arg
;
3372 conn_t
*connp
= tcp
->tcp_connp
;
3376 tcp_update_lso(tcp
, connp
->conn_ixa
);
3379 tcp_update_pmtu(tcp
, B_FALSE
);
3382 tcp_update_zcopy(tcp
);
3390 * The TCP write service routine should never be called...
3394 tcp_wsrv(queue_t
*q
)
3396 tcp_stack_t
*tcps
= Q_TO_TCP(q
)->tcp_tcps
;
3398 TCP_STAT(tcps
, tcp_wsrv_called
);
3402 * Hash list lookup routine for tcp_t structures.
3403 * Returns with a CONN_INC_REF tcp structure. Caller must do a CONN_DEC_REF.
3406 tcp_acceptor_hash_lookup(t_uscalar_t id
, tcp_stack_t
*tcps
)
3411 tf
= &tcps
->tcps_acceptor_fanout
[TCP_ACCEPTOR_HASH(id
)];
3412 mutex_enter(&tf
->tf_lock
);
3413 for (tcp
= tf
->tf_tcp
; tcp
!= NULL
;
3414 tcp
= tcp
->tcp_acceptor_hash
) {
3415 if (tcp
->tcp_acceptor_id
== id
) {
3416 CONN_INC_REF(tcp
->tcp_connp
);
3417 mutex_exit(&tf
->tf_lock
);
3421 mutex_exit(&tf
->tf_lock
);
3426 * Hash list insertion routine for tcp_t structures.
3429 tcp_acceptor_hash_insert(t_uscalar_t id
, tcp_t
*tcp
)
3434 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3436 tf
= &tcps
->tcps_acceptor_fanout
[TCP_ACCEPTOR_HASH(id
)];
3438 if (tcp
->tcp_ptpahn
!= NULL
)
3439 tcp_acceptor_hash_remove(tcp
);
3441 mutex_enter(&tf
->tf_lock
);
3444 tcpnext
->tcp_ptpahn
= &tcp
->tcp_acceptor_hash
;
3445 tcp
->tcp_acceptor_hash
= tcpnext
;
3446 tcp
->tcp_ptpahn
= tcpp
;
3448 tcp
->tcp_acceptor_lockp
= &tf
->tf_lock
; /* For tcp_*_hash_remove */
3449 mutex_exit(&tf
->tf_lock
);
3453 * Hash list removal routine for tcp_t structures.
3456 tcp_acceptor_hash_remove(tcp_t
*tcp
)
3462 * Extract the lock pointer in case there are concurrent
3463 * hash_remove's for this instance.
3465 lockp
= tcp
->tcp_acceptor_lockp
;
3467 if (tcp
->tcp_ptpahn
== NULL
)
3470 ASSERT(lockp
!= NULL
);
3472 if (tcp
->tcp_ptpahn
) {
3473 tcpnext
= tcp
->tcp_acceptor_hash
;
3475 tcpnext
->tcp_ptpahn
= tcp
->tcp_ptpahn
;
3476 tcp
->tcp_acceptor_hash
= NULL
;
3478 *tcp
->tcp_ptpahn
= tcpnext
;
3479 tcp
->tcp_ptpahn
= NULL
;
3482 tcp
->tcp_acceptor_lockp
= NULL
;
3486 * Type three generator adapted from the random() function in 4.4 BSD:
3490 * Copyright (c) 1983, 1993
3491 * The Regents of the University of California. All rights reserved.
3493 * Redistribution and use in source and binary forms, with or without
3494 * modification, are permitted provided that the following conditions
3496 * 1. Redistributions of source code must retain the above copyright
3497 * notice, this list of conditions and the following disclaimer.
3498 * 2. Redistributions in binary form must reproduce the above copyright
3499 * notice, this list of conditions and the following disclaimer in the
3500 * documentation and/or other materials provided with the distribution.
3501 * 3. All advertising materials mentioning features or use of this software
3502 * must display the following acknowledgement:
3503 * This product includes software developed by the University of
3504 * California, Berkeley and its contributors.
3505 * 4. Neither the name of the University nor the names of its contributors
3506 * may be used to endorse or promote products derived from this software
3507 * without specific prior written permission.
3509 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
3510 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
3511 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
3512 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
3513 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
3514 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3515 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3516 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3517 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3518 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3523 * Called by IP when IP is loaded into the kernel
3526 tcp_ddi_g_init(void)
3528 tcp_timercache
= kmem_cache_create("tcp_timercache",
3529 sizeof (tcp_timer_t
) + sizeof (mblk_t
), 0,
3530 NULL
, NULL
, NULL
, NULL
, NULL
, 0);
3532 tcp_notsack_blk_cache
= kmem_cache_create("tcp_notsack_blk_cache",
3533 sizeof (notsack_blk_t
), 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
3535 /* A single callback independently of how many netstacks we have */
3536 ip_squeue_init(tcp_squeue_add
);
3538 tcp_g_kstat
= tcp_g_kstat_init(&tcp_g_statistics
);
3540 tcp_squeue_flag
= tcp_squeue_switch(tcp_squeue_wput
);
3543 * We want to be informed each time a stack is created or
3544 * destroyed in the kernel, so we can maintain the
3545 * set of tcp_stack_t's.
3547 netstack_register(NS_TCP
, tcp_stack_init
, NULL
, tcp_stack_fini
);
3551 #define INET_NAME "ip"
3554 * Initialize the TCP stack instance.
3557 tcp_stack_init(netstackid_t stackid
, netstack_t
*ns
)
3566 tcps
= (tcp_stack_t
*)kmem_zalloc(sizeof (*tcps
), KM_SLEEP
);
3567 tcps
->tcps_netstack
= ns
;
3569 /* Initialize locks */
3570 mutex_init(&tcps
->tcps_epriv_port_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3572 tcps
->tcps_g_num_epriv_ports
= TCP_NUM_EPRIV_PORTS
;
3573 tcps
->tcps_g_epriv_ports
[0] = ULP_DEF_EPRIV_PORT1
;
3574 tcps
->tcps_g_epriv_ports
[1] = ULP_DEF_EPRIV_PORT2
;
3575 tcps
->tcps_min_anonpriv_port
= 512;
3577 tcps
->tcps_bind_fanout
= kmem_zalloc(sizeof (tf_t
) *
3578 TCP_BIND_FANOUT_SIZE
, KM_SLEEP
);
3579 tcps
->tcps_acceptor_fanout
= kmem_zalloc(sizeof (tf_t
) *
3580 TCP_ACCEPTOR_FANOUT_SIZE
, KM_SLEEP
);
3582 for (i
= 0; i
< TCP_BIND_FANOUT_SIZE
; i
++) {
3583 mutex_init(&tcps
->tcps_bind_fanout
[i
].tf_lock
, NULL
,
3584 MUTEX_DEFAULT
, NULL
);
3587 for (i
= 0; i
< TCP_ACCEPTOR_FANOUT_SIZE
; i
++) {
3588 mutex_init(&tcps
->tcps_acceptor_fanout
[i
].tf_lock
, NULL
,
3589 MUTEX_DEFAULT
, NULL
);
3592 /* TCP's IPsec code calls the packet dropper. */
3593 ip_drop_register(&tcps
->tcps_dropper
, "TCP IPsec policy enforcement");
3595 arrsz
= tcp_propinfo_count
* sizeof (mod_prop_info_t
);
3596 tcps
->tcps_propinfo_tbl
= (mod_prop_info_t
*)kmem_alloc(arrsz
,
3598 bcopy(tcp_propinfo_tbl
, tcps
->tcps_propinfo_tbl
, arrsz
);
3601 * Note: To really walk the device tree you need the devinfo
3602 * pointer to your device which is only available after probe/attach.
3603 * The following is safe only because it uses ddi_root_node()
3605 tcp_max_optsize
= optcom_max_optsize(tcp_opt_obj
.odb_opt_des_arr
,
3606 tcp_opt_obj
.odb_opt_arr_cnt
);
3608 /* Initialize the RFC 6528 ISS. */
3609 random_get_pseudo_bytes(secret
, sizeof(secret
));
3610 MD5Init(&tcps
->tcps_iss_key
);
3611 MD5Update(&tcps
->tcps_iss_key
, secret
, sizeof(secret
));
3613 tcps
->tcps_kstat
= tcp_kstat2_init(stackid
);
3614 tcps
->tcps_mibkp
= tcp_kstat_init(stackid
);
3616 major
= mod_name_to_major(INET_NAME
);
3617 error
= ldi_ident_from_major(major
, &tcps
->tcps_ldi_ident
);
3619 tcps
->tcps_ixa_cleanup_mp
= allocb_wait(0, BPRI_MED
, STR_NOSIG
, NULL
);
3620 ASSERT(tcps
->tcps_ixa_cleanup_mp
!= NULL
);
3621 cv_init(&tcps
->tcps_ixa_cleanup_ready_cv
, NULL
, CV_DEFAULT
, NULL
);
3622 cv_init(&tcps
->tcps_ixa_cleanup_done_cv
, NULL
, CV_DEFAULT
, NULL
);
3623 mutex_init(&tcps
->tcps_ixa_cleanup_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3625 mutex_init(&tcps
->tcps_reclaim_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3626 tcps
->tcps_reclaim
= B_FALSE
;
3627 tcps
->tcps_reclaim_tid
= 0;
3628 tcps
->tcps_reclaim_period
= tcps
->tcps_rexmit_interval_max
;
3631 * ncpus is the current number of CPUs, which can be bigger than
3632 * boot_ncpus. But we don't want to use ncpus to allocate all the
3633 * tcp_stats_cpu_t at system boot up time since it will be 1. While
3634 * we handle adding CPU in tcp_cpu_update(), it will be slow if
3635 * there are many CPUs as we will be adding them 1 by 1.
3637 * Note that tcps_sc_cnt never decreases and the tcps_sc[x] pointers
3638 * are not freed until the stack is going away. So there is no need
3639 * to grab a lock to access the per CPU tcps_sc[x] pointer.
3641 mutex_enter(&cpu_lock
);
3642 tcps
->tcps_sc_cnt
= MAX(ncpus
, boot_ncpus
);
3643 mutex_exit(&cpu_lock
);
3644 tcps
->tcps_sc
= kmem_zalloc(max_ncpus
* sizeof (tcp_stats_cpu_t
*),
3646 for (i
= 0; i
< tcps
->tcps_sc_cnt
; i
++) {
3647 tcps
->tcps_sc
[i
] = kmem_zalloc(sizeof (tcp_stats_cpu_t
),
3651 mutex_init(&tcps
->tcps_listener_conf_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3652 list_create(&tcps
->tcps_listener_conf
, sizeof (tcp_listener_t
),
3653 offsetof(tcp_listener_t
, tl_link
));
3659 * Called when the IP module is about to be unloaded.
3662 tcp_ddi_g_destroy(void)
3664 tcp_g_kstat_fini(tcp_g_kstat
);
3666 bzero(&tcp_g_statistics
, sizeof (tcp_g_statistics
));
3668 kmem_cache_destroy(tcp_timercache
);
3669 kmem_cache_destroy(tcp_notsack_blk_cache
);
3671 netstack_unregister(NS_TCP
);
3675 * Free the TCP stack instance.
3678 tcp_stack_fini(netstackid_t stackid
, void *arg
)
3680 tcp_stack_t
*tcps
= (tcp_stack_t
*)arg
;
3683 freeb(tcps
->tcps_ixa_cleanup_mp
);
3684 tcps
->tcps_ixa_cleanup_mp
= NULL
;
3685 cv_destroy(&tcps
->tcps_ixa_cleanup_ready_cv
);
3686 cv_destroy(&tcps
->tcps_ixa_cleanup_done_cv
);
3687 mutex_destroy(&tcps
->tcps_ixa_cleanup_lock
);
3690 * Set tcps_reclaim to false tells tcp_reclaim_timer() not to restart
3693 mutex_enter(&tcps
->tcps_reclaim_lock
);
3694 tcps
->tcps_reclaim
= B_FALSE
;
3695 mutex_exit(&tcps
->tcps_reclaim_lock
);
3696 if (tcps
->tcps_reclaim_tid
!= 0)
3697 (void) untimeout(tcps
->tcps_reclaim_tid
);
3698 mutex_destroy(&tcps
->tcps_reclaim_lock
);
3700 tcp_listener_conf_cleanup(tcps
);
3702 for (i
= 0; i
< tcps
->tcps_sc_cnt
; i
++)
3703 kmem_free(tcps
->tcps_sc
[i
], sizeof (tcp_stats_cpu_t
));
3704 kmem_free(tcps
->tcps_sc
, max_ncpus
* sizeof (tcp_stats_cpu_t
*));
3706 kmem_free(tcps
->tcps_propinfo_tbl
,
3707 tcp_propinfo_count
* sizeof (mod_prop_info_t
));
3708 tcps
->tcps_propinfo_tbl
= NULL
;
3710 for (i
= 0; i
< TCP_BIND_FANOUT_SIZE
; i
++) {
3711 ASSERT(tcps
->tcps_bind_fanout
[i
].tf_tcp
== NULL
);
3712 mutex_destroy(&tcps
->tcps_bind_fanout
[i
].tf_lock
);
3715 for (i
= 0; i
< TCP_ACCEPTOR_FANOUT_SIZE
; i
++) {
3716 ASSERT(tcps
->tcps_acceptor_fanout
[i
].tf_tcp
== NULL
);
3717 mutex_destroy(&tcps
->tcps_acceptor_fanout
[i
].tf_lock
);
3720 kmem_free(tcps
->tcps_bind_fanout
, sizeof (tf_t
) * TCP_BIND_FANOUT_SIZE
);
3721 tcps
->tcps_bind_fanout
= NULL
;
3723 kmem_free(tcps
->tcps_acceptor_fanout
, sizeof (tf_t
) *
3724 TCP_ACCEPTOR_FANOUT_SIZE
);
3725 tcps
->tcps_acceptor_fanout
= NULL
;
3727 mutex_destroy(&tcps
->tcps_epriv_port_lock
);
3729 ip_drop_unregister(&tcps
->tcps_dropper
);
3731 tcp_kstat2_fini(stackid
, tcps
->tcps_kstat
);
3732 tcps
->tcps_kstat
= NULL
;
3734 tcp_kstat_fini(stackid
, tcps
->tcps_mibkp
);
3735 tcps
->tcps_mibkp
= NULL
;
3737 ldi_ident_release(tcps
->tcps_ldi_ident
);
3738 kmem_free(tcps
, sizeof (*tcps
));
3742 tcp_iss_init(tcp_t
*tcp
)
3745 struct { uint32_t ports
; in6_addr_t src
; in6_addr_t dst
; } arg
;
3747 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3748 conn_t
*connp
= tcp
->tcp_connp
;
3750 /* tcp_iss may already have been set in tcp_input_listener */
3751 if (!tcp
->tcp_iss
) {
3752 tcp
->tcp_iss
= gethrtime() >> ISS_NSEC_SHT
;
3753 context
= tcps
->tcps_iss_key
;
3754 arg
.ports
= connp
->conn_ports
;
3755 arg
.src
= connp
->conn_laddr_v6
;
3756 arg
.dst
= connp
->conn_faddr_v6
;
3757 MD5Update(&context
, (uchar_t
*)&arg
, sizeof (arg
));
3758 MD5Final((uchar_t
*)answer
, &context
);
3759 tcp
->tcp_iss
+= answer
[0] ^ answer
[1] ^ answer
[2] ^ answer
[3];
3761 tcp
->tcp_valid_bits
= TCP_ISS_VALID
;
3762 tcp
->tcp_fss
= tcp
->tcp_iss
- 1;
3763 tcp
->tcp_suna
= tcp
->tcp_iss
;
3764 tcp
->tcp_snxt
= tcp
->tcp_iss
+ 1;
3765 tcp
->tcp_rexmit_nxt
= tcp
->tcp_snxt
;
3766 tcp
->tcp_csuna
= tcp
->tcp_snxt
;
3770 * tcp_{set,clr}qfull() functions are used to either set or clear QFULL
3771 * on the specified backing STREAMS q. Note, the caller may make the
3772 * decision to call based on the tcp_t.tcp_flow_stopped value which
3773 * when check outside the q's lock is only an advisory check ...
3776 tcp_setqfull(tcp_t
*tcp
)
3778 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3779 conn_t
*connp
= tcp
->tcp_connp
;
3781 if (tcp
->tcp_closed
)
3784 conn_setqfull(connp
, &tcp
->tcp_flow_stopped
);
3785 if (tcp
->tcp_flow_stopped
)
3786 TCP_STAT(tcps
, tcp_flwctl_on
);
3790 tcp_clrqfull(tcp_t
*tcp
)
3792 conn_t
*connp
= tcp
->tcp_connp
;
3794 if (tcp
->tcp_closed
)
3796 conn_clrqfull(connp
, &tcp
->tcp_flow_stopped
);
3800 tcp_squeue_switch(int val
)
3818 * This is called once for each squeue - globally for all stack
3822 tcp_squeue_add(squeue_t
*sqp
)
3824 tcp_squeue_priv_t
*tcp_time_wait
= kmem_zalloc(
3825 sizeof (tcp_squeue_priv_t
), KM_SLEEP
);
3827 *squeue_getprivate(sqp
, SQPRIVATE_TCP
) = (intptr_t)tcp_time_wait
;
3828 if (tcp_free_list_max_cnt
== 0) {
3829 int tcp_ncpus
= ((boot_max_ncpus
== -1) ?
3830 max_ncpus
: boot_max_ncpus
);
3833 * Limit number of entries to 1% of availble memory / tcp_ncpus
3835 tcp_free_list_max_cnt
= (freemem
* PAGESIZE
) /
3836 (tcp_ncpus
* sizeof (tcp_t
) * 100);
3838 tcp_time_wait
->tcp_free_list_cnt
= 0;
3841 * Return unix error is tli error is TSYSERR, otherwise return a negative
3845 tcp_do_bind(conn_t
*connp
, struct sockaddr
*sa
, socklen_t len
, cred_t
*cr
,
3846 boolean_t bind_to_req_port_only
)
3849 tcp_t
*tcp
= connp
->conn_tcp
;
3851 if (tcp
->tcp_state
>= TCPS_BOUND
) {
3852 if (connp
->conn_debug
) {
3853 (void) strlog(TCP_MOD_ID
, 0, 1, SL_ERROR
|SL_TRACE
,
3854 "tcp_bind: bad state, %d", tcp
->tcp_state
);
3856 return (-TOUTSTATE
);
3859 error
= tcp_bind_check(connp
, sa
, len
, cr
, bind_to_req_port_only
);
3863 ASSERT(tcp
->tcp_state
== TCPS_BOUND
);
3864 tcp
->tcp_conn_req_max
= 0;
3869 * If the return value from this function is positive, it's a UNIX error.
3870 * Otherwise, if it's negative, then the absolute value is a TLI error.
3871 * the TPI routine tcp_tpi_connect() is a wrapper function for this.
3874 tcp_do_connect(conn_t
*connp
, const struct sockaddr
*sa
, socklen_t len
,
3875 cred_t
*cr
, pid_t pid
)
3877 tcp_t
*tcp
= connp
->conn_tcp
;
3878 sin_t
*sin
= (sin_t
*)sa
;
3879 sin6_t
*sin6
= (sin6_t
*)sa
;
3886 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
3888 ip_xmit_attr_t
*ixa
= connp
->conn_ixa
;
3890 oldstate
= tcp
->tcp_state
;
3895 * Should never happen
3899 case sizeof (sin_t
):
3901 if (sin
->sin_port
== 0) {
3904 if (connp
->conn_ipv6_v6only
) {
3905 return (EAFNOSUPPORT
);
3909 case sizeof (sin6_t
):
3910 sin6
= (sin6_t
*)sa
;
3911 if (sin6
->sin6_port
== 0) {
3917 * If we're connecting to an IPv4-mapped IPv6 address, we need to
3918 * make sure that the conn_ipversion is IPV4_VERSION. We
3919 * need to this before we call tcp_bindi() so that the port lookup
3920 * code will look for ports in the correct port space (IPv4 and
3921 * IPv6 have separate port spaces).
3923 if (connp
->conn_family
== AF_INET6
&&
3924 connp
->conn_ipversion
== IPV6_VERSION
&&
3925 IN6_IS_ADDR_V4MAPPED(&sin6
->sin6_addr
)) {
3926 if (connp
->conn_ipv6_v6only
)
3927 return (EADDRNOTAVAIL
);
3929 connp
->conn_ipversion
= IPV4_VERSION
;
3932 switch (tcp
->tcp_state
) {
3935 * Listening sockets are not allowed to issue connect().
3937 if (IPCL_IS_NONSTR(connp
))
3938 return (EOPNOTSUPP
);
3942 * We support quick connect, refer to comments in
3949 return (-TOUTSTATE
);
3953 * We update our cred/cpid based on the caller of connect
3955 if (connp
->conn_cred
!= cr
) {
3957 crfree(connp
->conn_cred
);
3958 connp
->conn_cred
= cr
;
3960 connp
->conn_cpid
= pid
;
3962 /* Cache things in the ixa without any refhold */
3963 ASSERT(!(ixa
->ixa_free_flags
& IXA_FREE_CRED
));
3965 ixa
->ixa_cpid
= pid
;
3967 if (connp
->conn_family
== AF_INET6
) {
3968 if (!IN6_IS_ADDR_V4MAPPED(&sin6
->sin6_addr
)) {
3969 error
= tcp_connect_ipv6(tcp
, &sin6
->sin6_addr
,
3970 sin6
->sin6_port
, sin6
->sin6_flowinfo
,
3971 sin6
->__sin6_src_id
, sin6
->sin6_scope_id
);
3974 * Destination adress is mapped IPv6 address.
3975 * Source bound address should be unspecified or
3976 * IPv6 mapped address as well.
3978 if (!IN6_IS_ADDR_UNSPECIFIED(
3979 &connp
->conn_bound_addr_v6
) &&
3980 !IN6_IS_ADDR_V4MAPPED(&connp
->conn_bound_addr_v6
)) {
3981 return (EADDRNOTAVAIL
);
3983 dstaddrp
= &V4_PART_OF_V6((sin6
->sin6_addr
));
3984 dstport
= sin6
->sin6_port
;
3985 srcid
= sin6
->__sin6_src_id
;
3986 error
= tcp_connect_ipv4(tcp
, dstaddrp
, dstport
,
3990 dstaddrp
= &sin
->sin_addr
.s_addr
;
3991 dstport
= sin
->sin_port
;
3993 error
= tcp_connect_ipv4(tcp
, dstaddrp
, dstport
, srcid
);
3997 goto connect_failed
;
3999 /* connect succeeded */
4000 TCPS_BUMP_MIB(tcps
, tcpActiveOpens
);
4001 tcp
->tcp_active_open
= 1;
4004 * tcp_set_destination() does not adjust for TCP/IP header length.
4006 mss
= tcp
->tcp_mss
- connp
->conn_ht_iphc_len
;
4009 * Just make sure our rwnd is at least rcvbuf * MSS large, and round up
4010 * to the nearest MSS.
4012 * We do the round up here because we need to get the interface MTU
4013 * first before we can do the round up.
4015 tcp
->tcp_rwnd
= connp
->conn_rcvbuf
;
4016 tcp
->tcp_rwnd
= MAX(MSS_ROUNDUP(tcp
->tcp_rwnd
, mss
),
4017 tcps
->tcps_recv_hiwat_minmss
* mss
);
4018 connp
->conn_rcvbuf
= tcp
->tcp_rwnd
;
4019 tcp_set_ws_value(tcp
);
4020 tcp
->tcp_tcpha
->tha_win
= htons(tcp
->tcp_rwnd
>> tcp
->tcp_rcv_ws
);
4021 if (tcp
->tcp_rcv_ws
> 0 || tcps
->tcps_wscale_always
)
4022 tcp
->tcp_snd_ws_ok
= B_TRUE
;
4025 * Set tcp_snd_ts_ok to true
4026 * so that tcp_xmit_mp will
4027 * include the timestamp
4028 * option in the SYN segment.
4030 if (tcps
->tcps_tstamp_always
||
4031 (tcp
->tcp_rcv_ws
&& tcps
->tcps_tstamp_if_wscale
)) {
4032 tcp
->tcp_snd_ts_ok
= B_TRUE
;
4036 * Note that tcp_snd_sack_ok can be set in tcp_set_destination() if
4037 * the SACK metric is set. So here we just check the per stack SACK
4040 if (tcps
->tcps_sack_permitted
== 2) {
4041 ASSERT(tcp
->tcp_num_sack_blk
== 0);
4042 ASSERT(tcp
->tcp_notsack_list
== NULL
);
4043 tcp
->tcp_snd_sack_ok
= B_TRUE
;
4047 * Should we use ECN? Note that the current
4048 * default value (SunOS 5.9) of tcp_ecn_permitted
4049 * is 1. The reason for doing this is that there
4050 * are equipments out there that will drop ECN
4051 * enabled IP packets. Setting it to 1 avoids
4052 * compatibility problems.
4054 if (tcps
->tcps_ecn_permitted
== 2)
4055 tcp
->tcp_ecn_ok
= B_TRUE
;
4057 /* Trace change from BOUND -> SYN_SENT here */
4058 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
4059 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
4060 int32_t, TCPS_BOUND
);
4062 TCP_TIMER_RESTART(tcp
, tcp
->tcp_rto
);
4063 syn_mp
= tcp_xmit_mp(tcp
, NULL
, 0, NULL
, NULL
,
4064 tcp
->tcp_iss
, B_FALSE
, NULL
, B_FALSE
);
4065 if (syn_mp
!= NULL
) {
4067 * We must bump the generation before sending the syn
4068 * to ensure that we use the right generation in case
4069 * this thread issues a "connected" up call.
4071 SOCK_CONNID_BUMP(tcp
->tcp_connid
);
4073 * DTrace sending the first SYN as a
4074 * tcp:::connect-request event.
4076 DTRACE_TCP5(connect__request
, mblk_t
*, NULL
,
4077 ip_xmit_attr_t
*, connp
->conn_ixa
,
4078 void_ip_t
*, syn_mp
->b_rptr
, tcp_t
*, tcp
,
4080 &syn_mp
->b_rptr
[connp
->conn_ixa
->ixa_ip_hdr_length
]);
4081 tcp_send_data(tcp
, syn_mp
);
4084 if (tcp
->tcp_conn
.tcp_opts_conn_req
!= NULL
)
4085 tcp_close_mpp(&tcp
->tcp_conn
.tcp_opts_conn_req
);
4089 connp
->conn_faddr_v6
= ipv6_all_zeros
;
4090 connp
->conn_fport
= 0;
4091 tcp
->tcp_state
= oldstate
;
4092 if (tcp
->tcp_conn
.tcp_opts_conn_req
!= NULL
)
4093 tcp_close_mpp(&tcp
->tcp_conn
.tcp_opts_conn_req
);
4098 tcp_do_listen(conn_t
*connp
, struct sockaddr
*sa
, socklen_t len
,
4099 int backlog
, cred_t
*cr
, boolean_t bind_to_req_port_only
)
4101 tcp_t
*tcp
= connp
->conn_tcp
;
4103 tcp_stack_t
*tcps
= tcp
->tcp_tcps
;
4106 /* All Solaris components should pass a cred for this operation. */
4109 if (tcp
->tcp_state
>= TCPS_BOUND
) {
4110 if ((tcp
->tcp_state
== TCPS_BOUND
||
4111 tcp
->tcp_state
== TCPS_LISTEN
) && backlog
> 0) {
4113 * Handle listen() increasing backlog.
4114 * This is more "liberal" then what the TPI spec
4115 * requires but is needed to avoid a t_unbind
4116 * when handling listen() since the port number
4117 * might be "stolen" between the unbind and bind.
4121 if (connp
->conn_debug
) {
4122 (void) strlog(TCP_MOD_ID
, 0, 1, SL_ERROR
|SL_TRACE
,
4123 "tcp_listen: bad state, %d", tcp
->tcp_state
);
4125 return (-TOUTSTATE
);
4132 ASSERT(IPCL_IS_NONSTR(connp
));
4133 /* Do an implicit bind: Request for a generic port. */
4134 if (connp
->conn_family
== AF_INET
) {
4135 len
= sizeof (sin_t
);
4136 sin
= (sin_t
*)&addr
;
4138 sin
->sin_family
= AF_INET
;
4140 ASSERT(connp
->conn_family
== AF_INET6
);
4141 len
= sizeof (sin6_t
);
4142 sin6
= (sin6_t
*)&addr
;
4144 sin6
->sin6_family
= AF_INET6
;
4146 sa
= (struct sockaddr
*)&addr
;
4149 error
= tcp_bind_check(connp
, sa
, len
, cr
,
4150 bind_to_req_port_only
);
4153 /* Fall through and do the fanout insertion */
4157 ASSERT(tcp
->tcp_state
== TCPS_BOUND
|| tcp
->tcp_state
== TCPS_LISTEN
);
4158 tcp
->tcp_conn_req_max
= backlog
;
4159 if (tcp
->tcp_conn_req_max
) {
4160 if (tcp
->tcp_conn_req_max
< tcps
->tcps_conn_req_min
)
4161 tcp
->tcp_conn_req_max
= tcps
->tcps_conn_req_min
;
4162 if (tcp
->tcp_conn_req_max
> tcps
->tcps_conn_req_max_q
)
4163 tcp
->tcp_conn_req_max
= tcps
->tcps_conn_req_max_q
;
4165 * If this is a listener, do not reset the eager list
4166 * and other stuffs. Note that we don't check if the
4167 * existing eager list meets the new tcp_conn_req_max
4170 if (tcp
->tcp_state
!= TCPS_LISTEN
) {
4171 tcp
->tcp_state
= TCPS_LISTEN
;
4172 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
4173 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
,
4174 void, NULL
, int32_t, TCPS_BOUND
);
4175 /* Initialize the chain. Don't need the eager_lock */
4176 tcp
->tcp_eager_next_q0
= tcp
->tcp_eager_prev_q0
= tcp
;
4177 tcp
->tcp_eager_next_drop_q0
= tcp
;
4178 tcp
->tcp_eager_prev_drop_q0
= tcp
;
4179 tcp
->tcp_second_ctimer_threshold
=
4180 tcps
->tcps_ip_abort_linterval
;
4185 * We need to make sure that the conn_recv is set to a non-null
4186 * value before we insert the conn into the classifier table.
4187 * This is to avoid a race with an incoming packet which does an
4189 * We initially set it to tcp_input_listener_unbound to try to
4190 * pick a good squeue for the listener when the first SYN arrives.
4191 * tcp_input_listener_unbound sets it to tcp_input_listener on that
4194 connp
->conn_recv
= tcp_input_listener_unbound
;
4196 /* Insert the listener in the classifier table */
4197 error
= ip_laddr_fanout_insert(connp
);
4199 /* Undo the bind - release the port number */
4200 oldstate
= tcp
->tcp_state
;
4201 tcp
->tcp_state
= TCPS_IDLE
;
4202 DTRACE_TCP6(state__change
, void, NULL
, ip_xmit_attr_t
*,
4203 connp
->conn_ixa
, void, NULL
, tcp_t
*, tcp
, void, NULL
,
4205 connp
->conn_bound_addr_v6
= ipv6_all_zeros
;
4207 connp
->conn_laddr_v6
= ipv6_all_zeros
;
4208 connp
->conn_saddr_v6
= ipv6_all_zeros
;
4209 connp
->conn_ports
= 0;
4211 tcp_bind_hash_remove(tcp
);
4215 * If there is a connection limit, allocate and initialize
4216 * the counter struct. Note that since listen can be called
4217 * multiple times, the struct may have been allready allocated.
4219 if (!list_is_empty(&tcps
->tcps_listener_conf
) &&
4220 tcp
->tcp_listen_cnt
== NULL
) {
4221 tcp_listen_cnt_t
*tlc
;
4224 ratio
= tcp_find_listener_conf(tcps
,
4225 ntohs(connp
->conn_lport
));
4227 uint32_t mem_ratio
, tot_buf
;
4229 tlc
= kmem_alloc(sizeof (tcp_listen_cnt_t
),
4232 * Calculate the connection limit based on
4233 * the configured ratio and maxusers. Maxusers
4234 * are calculated based on memory size,
4235 * ~ 1 user per MB. Note that the conn_rcvbuf
4236 * and conn_sndbuf may change after a
4237 * connection is accepted. So what we have
4238 * is only an approximation.
4240 if ((tot_buf
= connp
->conn_rcvbuf
+
4241 connp
->conn_sndbuf
) < MB
) {
4242 mem_ratio
= MB
/ tot_buf
;
4243 tlc
->tlc_max
= maxusers
/ ratio
*
4246 mem_ratio
= tot_buf
/ MB
;
4247 tlc
->tlc_max
= maxusers
/ ratio
/
4250 /* At least we should allow two connections! */
4251 if (tlc
->tlc_max
<= tcp_min_conn_listener
)
4252 tlc
->tlc_max
= tcp_min_conn_listener
;
4255 tcp
->tcp_listen_cnt
= tlc
;