2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $ */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_crc32.h>
48 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_cc_functions.h>
53 #define NUMBER_OF_MTU_SIZES 18
57 #define KTR_SCTP KTR_SUBSYS
61 sctp_sblog(struct sockbuf
*sb
,
62 struct sctp_tcb
*stcb
, int from
, int incr
)
64 struct sctp_cwnd_log sctp_clog
;
66 sctp_clog
.x
.sb
.stcb
= stcb
;
67 sctp_clog
.x
.sb
.so_sbcc
= sb
->sb_cc
;
69 sctp_clog
.x
.sb
.stcb_sbcc
= stcb
->asoc
.sb_cc
;
71 sctp_clog
.x
.sb
.stcb_sbcc
= 0;
72 sctp_clog
.x
.sb
.incr
= incr
;
73 SCTP_CTR6(KTR_SCTP
, "SCTP:%d[%d]:%x-%x-%x-%x",
76 sctp_clog
.x
.misc
.log1
,
77 sctp_clog
.x
.misc
.log2
,
78 sctp_clog
.x
.misc
.log3
,
79 sctp_clog
.x
.misc
.log4
);
83 sctp_log_closing(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
, int16_t loc
)
85 struct sctp_cwnd_log sctp_clog
;
87 sctp_clog
.x
.close
.inp
= (void *)inp
;
88 sctp_clog
.x
.close
.sctp_flags
= inp
->sctp_flags
;
90 sctp_clog
.x
.close
.stcb
= (void *)stcb
;
91 sctp_clog
.x
.close
.state
= (uint16_t) stcb
->asoc
.state
;
93 sctp_clog
.x
.close
.stcb
= 0;
94 sctp_clog
.x
.close
.state
= 0;
96 sctp_clog
.x
.close
.loc
= loc
;
97 SCTP_CTR6(KTR_SCTP
, "SCTP:%d[%d]:%x-%x-%x-%x",
100 sctp_clog
.x
.misc
.log1
,
101 sctp_clog
.x
.misc
.log2
,
102 sctp_clog
.x
.misc
.log3
,
103 sctp_clog
.x
.misc
.log4
);
108 rto_logging(struct sctp_nets
*net
, int from
)
110 struct sctp_cwnd_log sctp_clog
;
112 memset(&sctp_clog
, 0, sizeof(sctp_clog
));
113 sctp_clog
.x
.rto
.net
= (void *)net
;
114 sctp_clog
.x
.rto
.rtt
= net
->prev_rtt
;
115 SCTP_CTR6(KTR_SCTP
, "SCTP:%d[%d]:%x-%x-%x-%x",
118 sctp_clog
.x
.misc
.log1
,
119 sctp_clog
.x
.misc
.log2
,
120 sctp_clog
.x
.misc
.log3
,
121 sctp_clog
.x
.misc
.log4
);
126 sctp_log_strm_del_alt(struct sctp_tcb
*stcb
, uint32_t tsn
, uint16_t sseq
, uint16_t stream
, int from
)
128 struct sctp_cwnd_log sctp_clog
;
130 sctp_clog
.x
.strlog
.stcb
= stcb
;
131 sctp_clog
.x
.strlog
.n_tsn
= tsn
;
132 sctp_clog
.x
.strlog
.n_sseq
= sseq
;
133 sctp_clog
.x
.strlog
.e_tsn
= 0;
134 sctp_clog
.x
.strlog
.e_sseq
= 0;
135 sctp_clog
.x
.strlog
.strm
= stream
;
136 SCTP_CTR6(KTR_SCTP
, "SCTP:%d[%d]:%x-%x-%x-%x",
139 sctp_clog
.x
.misc
.log1
,
140 sctp_clog
.x
.misc
.log2
,
141 sctp_clog
.x
.misc
.log3
,
142 sctp_clog
.x
.misc
.log4
);
147 sctp_log_nagle_event(struct sctp_tcb
*stcb
, int action
)
149 struct sctp_cwnd_log sctp_clog
;
151 sctp_clog
.x
.nagle
.stcb
= (void *)stcb
;
152 sctp_clog
.x
.nagle
.total_flight
= stcb
->asoc
.total_flight
;
153 sctp_clog
.x
.nagle
.total_in_queue
= stcb
->asoc
.total_output_queue_size
;
154 sctp_clog
.x
.nagle
.count_in_queue
= stcb
->asoc
.chunks_on_out_queue
;
155 sctp_clog
.x
.nagle
.count_in_flight
= stcb
->asoc
.total_flight_count
;
156 SCTP_CTR6(KTR_SCTP
, "SCTP:%d[%d]:%x-%x-%x-%x",
157 SCTP_LOG_EVENT_NAGLE
,
159 sctp_clog
.x
.misc
.log1
,
160 sctp_clog
.x
.misc
.log2
,
161 sctp_clog
.x
.misc
.log3
,
162 sctp_clog
.x
.misc
.log4
);
167 sctp_log_sack(uint32_t old_cumack
, uint32_t cumack
, uint32_t tsn
, uint16_t gaps
, uint16_t dups
, int from
)
169 struct sctp_cwnd_log sctp_clog
;
171 sctp_clog
.x
.sack
.cumack
= cumack
;
172 sctp_clog
.x
.sack
.oldcumack
= old_cumack
;
173 sctp_clog
.x
.sack
.tsn
= tsn
;
174 sctp_clog
.x
.sack
.numGaps
= gaps
;
175 sctp_clog
.x
.sack
.numDups
= dups
;
176 SCTP_CTR6(KTR_SCTP
, "SCTP:%d[%d]:%x-%x-%x-%x",
179 sctp_clog
.x
.misc
.log1
,
180 sctp_clog
.x
.misc
.log2
,
181 sctp_clog
.x
.misc
.log3
,
182 sctp_clog
.x
.misc
.log4
);
186 sctp_log_map(uint32_t map
, uint32_t cum
, uint32_t high
, int from
)
188 struct sctp_cwnd_log sctp_clog
;
190 memset(&sctp_clog
, 0, sizeof(sctp_clog
));
191 sctp_clog
.x
.map
.base
= map
;
192 sctp_clog
.x
.map
.cum
= cum
;
193 sctp_clog
.x
.map
.high
= high
;
194 SCTP_CTR6(KTR_SCTP
, "SCTP:%d[%d]:%x-%x-%x-%x",
197 sctp_clog
.x
.misc
.log1
,
198 sctp_clog
.x
.misc
.log2
,
199 sctp_clog
.x
.misc
.log3
,
200 sctp_clog
.x
.misc
.log4
);
204 sctp_log_fr(uint32_t biggest_tsn
, uint32_t biggest_new_tsn
, uint32_t tsn
,
207 struct sctp_cwnd_log sctp_clog
;
209 memset(&sctp_clog
, 0, sizeof(sctp_clog
));
210 sctp_clog
.x
.fr
.largest_tsn
= biggest_tsn
;
211 sctp_clog
.x
.fr
.largest_new_tsn
= biggest_new_tsn
;
212 sctp_clog
.x
.fr
.tsn
= tsn
;
213 SCTP_CTR6(KTR_SCTP
, "SCTP:%d[%d]:%x-%x-%x-%x",
216 sctp_clog
.x
.misc
.log1
,
217 sctp_clog
.x
.misc
.log2
,
218 sctp_clog
.x
.misc
.log3
,
219 sctp_clog
.x
.misc
.log4
);
225 sctp_log_mb(struct mbuf
*m
, int from
)
227 struct sctp_cwnd_log sctp_clog
;
229 sctp_clog
.x
.mb
.mp
= m
;
230 sctp_clog
.x
.mb
.mbuf_flags
= (uint8_t) (SCTP_BUF_GET_FLAGS(m
));
231 sctp_clog
.x
.mb
.size
= (uint16_t) (SCTP_BUF_LEN(m
));
232 sctp_clog
.x
.mb
.data
= SCTP_BUF_AT(m
, 0);
233 if (SCTP_BUF_IS_EXTENDED(m
)) {
234 sctp_clog
.x
.mb
.ext
= SCTP_BUF_EXTEND_BASE(m
);
235 sctp_clog
.x
.mb
.refcnt
= (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m
));
237 sctp_clog
.x
.mb
.ext
= 0;
238 sctp_clog
.x
.mb
.refcnt
= 0;
240 SCTP_CTR6(KTR_SCTP
, "SCTP:%d[%d]:%x-%x-%x-%x",
243 sctp_clog
.x
.misc
.log1
,
244 sctp_clog
.x
.misc
.log2
,
245 sctp_clog
.x
.misc
.log3
,
246 sctp_clog
.x
.misc
.log4
);
251 sctp_log_strm_del(struct sctp_queued_to_read
*control
, struct sctp_queued_to_read
*poschk
,
254 struct sctp_cwnd_log sctp_clog
;
256 if (control
== NULL
) {
257 SCTP_PRINTF("Gak log of NULL?\n");
260 sctp_clog
.x
.strlog
.stcb
= control
->stcb
;
261 sctp_clog
.x
.strlog
.n_tsn
= control
->sinfo_tsn
;
262 sctp_clog
.x
.strlog
.n_sseq
= control
->sinfo_ssn
;
263 sctp_clog
.x
.strlog
.strm
= control
->sinfo_stream
;
264 if (poschk
!= NULL
) {
265 sctp_clog
.x
.strlog
.e_tsn
= poschk
->sinfo_tsn
;
266 sctp_clog
.x
.strlog
.e_sseq
= poschk
->sinfo_ssn
;
268 sctp_clog
.x
.strlog
.e_tsn
= 0;
269 sctp_clog
.x
.strlog
.e_sseq
= 0;
271 SCTP_CTR6(KTR_SCTP
, "SCTP:%d[%d]:%x-%x-%x-%x",
274 sctp_clog
.x
.misc
.log1
,
275 sctp_clog
.x
.misc
.log2
,
276 sctp_clog
.x
.misc
.log3
,
277 sctp_clog
.x
.misc
.log4
);
282 sctp_log_cwnd(struct sctp_tcb
*stcb
, struct sctp_nets
*net
, int augment
, uint8_t from
)
284 struct sctp_cwnd_log sctp_clog
;
286 sctp_clog
.x
.cwnd
.net
= net
;
287 if (stcb
->asoc
.send_queue_cnt
> 255)
288 sctp_clog
.x
.cwnd
.cnt_in_send
= 255;
290 sctp_clog
.x
.cwnd
.cnt_in_send
= stcb
->asoc
.send_queue_cnt
;
291 if (stcb
->asoc
.stream_queue_cnt
> 255)
292 sctp_clog
.x
.cwnd
.cnt_in_str
= 255;
294 sctp_clog
.x
.cwnd
.cnt_in_str
= stcb
->asoc
.stream_queue_cnt
;
297 sctp_clog
.x
.cwnd
.cwnd_new_value
= net
->cwnd
;
298 sctp_clog
.x
.cwnd
.inflight
= net
->flight_size
;
299 sctp_clog
.x
.cwnd
.pseudo_cumack
= net
->pseudo_cumack
;
300 sctp_clog
.x
.cwnd
.meets_pseudo_cumack
= net
->new_pseudo_cumack
;
301 sctp_clog
.x
.cwnd
.need_new_pseudo_cumack
= net
->find_pseudo_cumack
;
303 if (SCTP_CWNDLOG_PRESEND
== from
) {
304 sctp_clog
.x
.cwnd
.meets_pseudo_cumack
= stcb
->asoc
.peers_rwnd
;
306 sctp_clog
.x
.cwnd
.cwnd_augment
= augment
;
307 SCTP_CTR6(KTR_SCTP
, "SCTP:%d[%d]:%x-%x-%x-%x",
310 sctp_clog
.x
.misc
.log1
,
311 sctp_clog
.x
.misc
.log2
,
312 sctp_clog
.x
.misc
.log3
,
313 sctp_clog
.x
.misc
.log4
);
318 sctp_log_lock(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
, uint8_t from
)
320 struct sctp_cwnd_log sctp_clog
;
322 memset(&sctp_clog
, 0, sizeof(sctp_clog
));
324 sctp_clog
.x
.lock
.sock
= (void *)inp
->sctp_socket
;
327 sctp_clog
.x
.lock
.sock
= (void *)NULL
;
329 sctp_clog
.x
.lock
.inp
= (void *)inp
;
331 sctp_clog
.x
.lock
.tcb_lock
= mtx_owned(&stcb
->tcb_mtx
);
333 sctp_clog
.x
.lock
.tcb_lock
= SCTP_LOCK_UNKNOWN
;
336 sctp_clog
.x
.lock
.inp_lock
= mtx_owned(&inp
->inp_mtx
);
337 sctp_clog
.x
.lock
.create_lock
= mtx_owned(&inp
->inp_create_mtx
);
339 sctp_clog
.x
.lock
.inp_lock
= SCTP_LOCK_UNKNOWN
;
340 sctp_clog
.x
.lock
.create_lock
= SCTP_LOCK_UNKNOWN
;
342 sctp_clog
.x
.lock
.info_lock
= rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx
));
343 if (inp
->sctp_socket
) {
344 sctp_clog
.x
.lock
.sock_lock
= mtx_owned(&(inp
->sctp_socket
->so_rcv
.sb_mtx
));
345 sctp_clog
.x
.lock
.sockrcvbuf_lock
= mtx_owned(&(inp
->sctp_socket
->so_rcv
.sb_mtx
));
346 sctp_clog
.x
.lock
.socksndbuf_lock
= mtx_owned(&(inp
->sctp_socket
->so_snd
.sb_mtx
));
348 sctp_clog
.x
.lock
.sock_lock
= SCTP_LOCK_UNKNOWN
;
349 sctp_clog
.x
.lock
.sockrcvbuf_lock
= SCTP_LOCK_UNKNOWN
;
350 sctp_clog
.x
.lock
.socksndbuf_lock
= SCTP_LOCK_UNKNOWN
;
352 SCTP_CTR6(KTR_SCTP
, "SCTP:%d[%d]:%x-%x-%x-%x",
355 sctp_clog
.x
.misc
.log1
,
356 sctp_clog
.x
.misc
.log2
,
357 sctp_clog
.x
.misc
.log3
,
358 sctp_clog
.x
.misc
.log4
);
363 sctp_log_maxburst(struct sctp_tcb
*stcb
, struct sctp_nets
*net
, int error
, int burst
, uint8_t from
)
365 struct sctp_cwnd_log sctp_clog
;
367 memset(&sctp_clog
, 0, sizeof(sctp_clog
));
368 sctp_clog
.x
.cwnd
.net
= net
;
369 sctp_clog
.x
.cwnd
.cwnd_new_value
= error
;
370 sctp_clog
.x
.cwnd
.inflight
= net
->flight_size
;
371 sctp_clog
.x
.cwnd
.cwnd_augment
= burst
;
372 if (stcb
->asoc
.send_queue_cnt
> 255)
373 sctp_clog
.x
.cwnd
.cnt_in_send
= 255;
375 sctp_clog
.x
.cwnd
.cnt_in_send
= stcb
->asoc
.send_queue_cnt
;
376 if (stcb
->asoc
.stream_queue_cnt
> 255)
377 sctp_clog
.x
.cwnd
.cnt_in_str
= 255;
379 sctp_clog
.x
.cwnd
.cnt_in_str
= stcb
->asoc
.stream_queue_cnt
;
380 SCTP_CTR6(KTR_SCTP
, "SCTP:%d[%d]:%x-%x-%x-%x",
381 SCTP_LOG_EVENT_MAXBURST
,
383 sctp_clog
.x
.misc
.log1
,
384 sctp_clog
.x
.misc
.log2
,
385 sctp_clog
.x
.misc
.log3
,
386 sctp_clog
.x
.misc
.log4
);
391 sctp_log_rwnd(uint8_t from
, uint32_t peers_rwnd
, uint32_t snd_size
, uint32_t overhead
)
393 struct sctp_cwnd_log sctp_clog
;
395 sctp_clog
.x
.rwnd
.rwnd
= peers_rwnd
;
396 sctp_clog
.x
.rwnd
.send_size
= snd_size
;
397 sctp_clog
.x
.rwnd
.overhead
= overhead
;
398 sctp_clog
.x
.rwnd
.new_rwnd
= 0;
399 SCTP_CTR6(KTR_SCTP
, "SCTP:%d[%d]:%x-%x-%x-%x",
402 sctp_clog
.x
.misc
.log1
,
403 sctp_clog
.x
.misc
.log2
,
404 sctp_clog
.x
.misc
.log3
,
405 sctp_clog
.x
.misc
.log4
);
409 sctp_log_rwnd_set(uint8_t from
, uint32_t peers_rwnd
, uint32_t flight_size
, uint32_t overhead
, uint32_t a_rwndval
)
411 struct sctp_cwnd_log sctp_clog
;
413 sctp_clog
.x
.rwnd
.rwnd
= peers_rwnd
;
414 sctp_clog
.x
.rwnd
.send_size
= flight_size
;
415 sctp_clog
.x
.rwnd
.overhead
= overhead
;
416 sctp_clog
.x
.rwnd
.new_rwnd
= a_rwndval
;
417 SCTP_CTR6(KTR_SCTP
, "SCTP:%d[%d]:%x-%x-%x-%x",
420 sctp_clog
.x
.misc
.log1
,
421 sctp_clog
.x
.misc
.log2
,
422 sctp_clog
.x
.misc
.log3
,
423 sctp_clog
.x
.misc
.log4
);
427 sctp_log_mbcnt(uint8_t from
, uint32_t total_oq
, uint32_t book
, uint32_t total_mbcnt_q
, uint32_t mbcnt
)
429 struct sctp_cwnd_log sctp_clog
;
431 sctp_clog
.x
.mbcnt
.total_queue_size
= total_oq
;
432 sctp_clog
.x
.mbcnt
.size_change
= book
;
433 sctp_clog
.x
.mbcnt
.total_queue_mb_size
= total_mbcnt_q
;
434 sctp_clog
.x
.mbcnt
.mbcnt_change
= mbcnt
;
435 SCTP_CTR6(KTR_SCTP
, "SCTP:%d[%d]:%x-%x-%x-%x",
436 SCTP_LOG_EVENT_MBCNT
,
438 sctp_clog
.x
.misc
.log1
,
439 sctp_clog
.x
.misc
.log2
,
440 sctp_clog
.x
.misc
.log3
,
441 sctp_clog
.x
.misc
.log4
);
446 sctp_misc_ints(uint8_t from
, uint32_t a
, uint32_t b
, uint32_t c
, uint32_t d
)
448 SCTP_CTR6(KTR_SCTP
, "SCTP:%d[%d]:%x-%x-%x-%x",
455 sctp_wakeup_log(struct sctp_tcb
*stcb
, uint32_t cumtsn
, uint32_t wake_cnt
, int from
)
457 struct sctp_cwnd_log sctp_clog
;
459 sctp_clog
.x
.wake
.stcb
= (void *)stcb
;
460 sctp_clog
.x
.wake
.wake_cnt
= wake_cnt
;
461 sctp_clog
.x
.wake
.flight
= stcb
->asoc
.total_flight_count
;
462 sctp_clog
.x
.wake
.send_q
= stcb
->asoc
.send_queue_cnt
;
463 sctp_clog
.x
.wake
.sent_q
= stcb
->asoc
.sent_queue_cnt
;
465 if (stcb
->asoc
.stream_queue_cnt
< 0xff)
466 sctp_clog
.x
.wake
.stream_qcnt
= (uint8_t) stcb
->asoc
.stream_queue_cnt
;
468 sctp_clog
.x
.wake
.stream_qcnt
= 0xff;
470 if (stcb
->asoc
.chunks_on_out_queue
< 0xff)
471 sctp_clog
.x
.wake
.chunks_on_oque
= (uint8_t) stcb
->asoc
.chunks_on_out_queue
;
473 sctp_clog
.x
.wake
.chunks_on_oque
= 0xff;
475 sctp_clog
.x
.wake
.sctpflags
= 0;
476 /* set in the defered mode stuff */
477 if (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_DONT_WAKE
)
478 sctp_clog
.x
.wake
.sctpflags
|= 1;
479 if (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_WAKEOUTPUT
)
480 sctp_clog
.x
.wake
.sctpflags
|= 2;
481 if (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_WAKEINPUT
)
482 sctp_clog
.x
.wake
.sctpflags
|= 4;
483 /* what about the sb */
484 if (stcb
->sctp_socket
) {
485 struct socket
*so
= stcb
->sctp_socket
;
487 sctp_clog
.x
.wake
.sbflags
= (uint8_t) ((so
->so_snd
.sb_flags
& 0x00ff));
489 sctp_clog
.x
.wake
.sbflags
= 0xff;
491 SCTP_CTR6(KTR_SCTP
, "SCTP:%d[%d]:%x-%x-%x-%x",
494 sctp_clog
.x
.misc
.log1
,
495 sctp_clog
.x
.misc
.log2
,
496 sctp_clog
.x
.misc
.log3
,
497 sctp_clog
.x
.misc
.log4
);
502 sctp_log_block(uint8_t from
, struct socket
*so
, struct sctp_association
*asoc
, int sendlen
)
504 struct sctp_cwnd_log sctp_clog
;
506 sctp_clog
.x
.blk
.onsb
= asoc
->total_output_queue_size
;
507 sctp_clog
.x
.blk
.send_sent_qcnt
= (uint16_t) (asoc
->send_queue_cnt
+ asoc
->sent_queue_cnt
);
508 sctp_clog
.x
.blk
.peer_rwnd
= asoc
->peers_rwnd
;
509 sctp_clog
.x
.blk
.stream_qcnt
= (uint16_t) asoc
->stream_queue_cnt
;
510 sctp_clog
.x
.blk
.chunks_on_oque
= (uint16_t) asoc
->chunks_on_out_queue
;
511 sctp_clog
.x
.blk
.flight_size
= (uint16_t) (asoc
->total_flight
/ 1024);
512 sctp_clog
.x
.blk
.sndlen
= sendlen
;
513 SCTP_CTR6(KTR_SCTP
, "SCTP:%d[%d]:%x-%x-%x-%x",
514 SCTP_LOG_EVENT_BLOCK
,
516 sctp_clog
.x
.misc
.log1
,
517 sctp_clog
.x
.misc
.log2
,
518 sctp_clog
.x
.misc
.log3
,
519 sctp_clog
.x
.misc
.log4
);
524 sctp_fill_stat_log(void *optval
, size_t *optsize
)
526 /* May need to fix this if ktrdump does not work */
530 #ifdef SCTP_AUDITING_ENABLED
531 uint8_t sctp_audit_data
[SCTP_AUDIT_SIZE
][2];
532 static int sctp_audit_indx
= 0;
536 sctp_print_audit_report(void)
542 for (i
= sctp_audit_indx
; i
< SCTP_AUDIT_SIZE
; i
++) {
543 if ((sctp_audit_data
[i
][0] == 0xe0) &&
544 (sctp_audit_data
[i
][1] == 0x01)) {
547 } else if (sctp_audit_data
[i
][0] == 0xf0) {
550 } else if ((sctp_audit_data
[i
][0] == 0xc0) &&
551 (sctp_audit_data
[i
][1] == 0x01)) {
555 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data
[i
][0],
556 (uint32_t) sctp_audit_data
[i
][1]);
561 for (i
= 0; i
< sctp_audit_indx
; i
++) {
562 if ((sctp_audit_data
[i
][0] == 0xe0) &&
563 (sctp_audit_data
[i
][1] == 0x01)) {
566 } else if (sctp_audit_data
[i
][0] == 0xf0) {
569 } else if ((sctp_audit_data
[i
][0] == 0xc0) &&
570 (sctp_audit_data
[i
][1] == 0x01)) {
574 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data
[i
][0],
575 (uint32_t) sctp_audit_data
[i
][1]);
584 sctp_auditing(int from
, struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
,
585 struct sctp_nets
*net
)
587 int resend_cnt
, tot_out
, rep
, tot_book_cnt
;
588 struct sctp_nets
*lnet
;
589 struct sctp_tmit_chunk
*chk
;
591 sctp_audit_data
[sctp_audit_indx
][0] = 0xAA;
592 sctp_audit_data
[sctp_audit_indx
][1] = 0x000000ff & from
;
594 if (sctp_audit_indx
>= SCTP_AUDIT_SIZE
) {
598 sctp_audit_data
[sctp_audit_indx
][0] = 0xAF;
599 sctp_audit_data
[sctp_audit_indx
][1] = 0x01;
601 if (sctp_audit_indx
>= SCTP_AUDIT_SIZE
) {
607 sctp_audit_data
[sctp_audit_indx
][0] = 0xAF;
608 sctp_audit_data
[sctp_audit_indx
][1] = 0x02;
610 if (sctp_audit_indx
>= SCTP_AUDIT_SIZE
) {
615 sctp_audit_data
[sctp_audit_indx
][0] = 0xA1;
616 sctp_audit_data
[sctp_audit_indx
][1] =
617 (0x000000ff & stcb
->asoc
.sent_queue_retran_cnt
);
619 if (sctp_audit_indx
>= SCTP_AUDIT_SIZE
) {
624 resend_cnt
= tot_out
= 0;
625 TAILQ_FOREACH(chk
, &stcb
->asoc
.sent_queue
, sctp_next
) {
626 if (chk
->sent
== SCTP_DATAGRAM_RESEND
) {
628 } else if (chk
->sent
< SCTP_DATAGRAM_RESEND
) {
629 tot_out
+= chk
->book_size
;
633 if (resend_cnt
!= stcb
->asoc
.sent_queue_retran_cnt
) {
634 sctp_audit_data
[sctp_audit_indx
][0] = 0xAF;
635 sctp_audit_data
[sctp_audit_indx
][1] = 0xA1;
637 if (sctp_audit_indx
>= SCTP_AUDIT_SIZE
) {
640 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
641 resend_cnt
, stcb
->asoc
.sent_queue_retran_cnt
);
643 stcb
->asoc
.sent_queue_retran_cnt
= resend_cnt
;
644 sctp_audit_data
[sctp_audit_indx
][0] = 0xA2;
645 sctp_audit_data
[sctp_audit_indx
][1] =
646 (0x000000ff & stcb
->asoc
.sent_queue_retran_cnt
);
648 if (sctp_audit_indx
>= SCTP_AUDIT_SIZE
) {
652 if (tot_out
!= stcb
->asoc
.total_flight
) {
653 sctp_audit_data
[sctp_audit_indx
][0] = 0xAF;
654 sctp_audit_data
[sctp_audit_indx
][1] = 0xA2;
656 if (sctp_audit_indx
>= SCTP_AUDIT_SIZE
) {
660 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out
,
661 (int)stcb
->asoc
.total_flight
);
662 stcb
->asoc
.total_flight
= tot_out
;
664 if (tot_book_cnt
!= stcb
->asoc
.total_flight_count
) {
665 sctp_audit_data
[sctp_audit_indx
][0] = 0xAF;
666 sctp_audit_data
[sctp_audit_indx
][1] = 0xA5;
668 if (sctp_audit_indx
>= SCTP_AUDIT_SIZE
) {
672 SCTP_PRINTF("tot_flt_book:%d\n", tot_book
);
674 stcb
->asoc
.total_flight_count
= tot_book_cnt
;
677 TAILQ_FOREACH(lnet
, &stcb
->asoc
.nets
, sctp_next
) {
678 tot_out
+= lnet
->flight_size
;
680 if (tot_out
!= stcb
->asoc
.total_flight
) {
681 sctp_audit_data
[sctp_audit_indx
][0] = 0xAF;
682 sctp_audit_data
[sctp_audit_indx
][1] = 0xA3;
684 if (sctp_audit_indx
>= SCTP_AUDIT_SIZE
) {
688 SCTP_PRINTF("real flight:%d net total was %d\n",
689 stcb
->asoc
.total_flight
, tot_out
);
690 /* now corrective action */
691 TAILQ_FOREACH(lnet
, &stcb
->asoc
.nets
, sctp_next
) {
694 TAILQ_FOREACH(chk
, &stcb
->asoc
.sent_queue
, sctp_next
) {
695 if ((chk
->whoTo
== lnet
) &&
696 (chk
->sent
< SCTP_DATAGRAM_RESEND
)) {
697 tot_out
+= chk
->book_size
;
700 if (lnet
->flight_size
!= tot_out
) {
701 SCTP_PRINTF("net:%x flight was %d corrected to %d\n",
702 (uint32_t) lnet
, lnet
->flight_size
,
704 lnet
->flight_size
= tot_out
;
709 sctp_print_audit_report();
714 sctp_audit_log(uint8_t ev
, uint8_t fd
)
717 sctp_audit_data
[sctp_audit_indx
][0] = ev
;
718 sctp_audit_data
[sctp_audit_indx
][1] = fd
;
720 if (sctp_audit_indx
>= SCTP_AUDIT_SIZE
) {
728 * a list of sizes based on typical mtu's, used only if next hop size not
731 static int sctp_mtu_sizes
[] = {
753 sctp_stop_timers_for_shutdown(struct sctp_tcb
*stcb
)
755 struct sctp_association
*asoc
;
756 struct sctp_nets
*net
;
760 (void)SCTP_OS_TIMER_STOP(&asoc
->hb_timer
.timer
);
761 (void)SCTP_OS_TIMER_STOP(&asoc
->dack_timer
.timer
);
762 (void)SCTP_OS_TIMER_STOP(&asoc
->strreset_timer
.timer
);
763 (void)SCTP_OS_TIMER_STOP(&asoc
->asconf_timer
.timer
);
764 (void)SCTP_OS_TIMER_STOP(&asoc
->autoclose_timer
.timer
);
765 (void)SCTP_OS_TIMER_STOP(&asoc
->delayed_event_timer
.timer
);
766 TAILQ_FOREACH(net
, &asoc
->nets
, sctp_next
) {
767 (void)SCTP_OS_TIMER_STOP(&net
->fr_timer
.timer
);
768 (void)SCTP_OS_TIMER_STOP(&net
->pmtu_timer
.timer
);
773 find_next_best_mtu(int totsz
)
778 * if we are in here we must find the next best fit based on the
779 * size of the dg that failed to be sent.
782 for (i
= 0; i
< NUMBER_OF_MTU_SIZES
; i
++) {
783 if (totsz
< sctp_mtu_sizes
[i
]) {
790 return (sctp_mtu_sizes
[perfer
]);
794 sctp_fill_random_store(struct sctp_pcb
*m
)
797 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
798 * our counter. The result becomes our good random numbers and we
799 * then setup to give these out. Note that we do no locking to
800 * protect this. This is ok, since if competing folks call this we
801 * will get more gobbled gook in the random store which is what we
802 * want. There is a danger that two guys will use the same random
803 * numbers, but thats ok too since that is random as well :->
806 (void)sctp_hmac(SCTP_HMAC
, (uint8_t *) m
->random_numbers
,
807 sizeof(m
->random_numbers
), (uint8_t *) & m
->random_counter
,
808 sizeof(m
->random_counter
), (uint8_t *) m
->random_store
);
813 sctp_select_initial_TSN(struct sctp_pcb
*inp
)
816 * A true implementation should use random selection process to get
817 * the initial stream sequence number, using RFC1750 as a good
822 int store_at
, new_store
;
824 if (inp
->initial_sequence_debug
!= 0) {
827 ret
= inp
->initial_sequence_debug
;
828 inp
->initial_sequence_debug
++;
832 store_at
= inp
->store_at
;
833 new_store
= store_at
+ sizeof(uint32_t);
834 if (new_store
>= (SCTP_SIGNATURE_SIZE
- 3)) {
837 if (!atomic_cmpset_int(&inp
->store_at
, store_at
, new_store
)) {
840 if (new_store
== 0) {
841 /* Refill the random store */
842 sctp_fill_random_store(inp
);
844 p
= &inp
->random_store
[store_at
];
851 sctp_select_a_tag(struct sctp_inpcb
*inp
, int save_in_twait
)
856 (void)SCTP_GETTIME_TIMEVAL(&now
);
859 x
= sctp_select_initial_TSN(&inp
->sctp_ep
);
864 if (sctp_is_vtag_good(inp
, x
, &now
, save_in_twait
)) {
872 sctp_init_asoc(struct sctp_inpcb
*m
, struct sctp_tcb
*stcb
,
873 int for_a_init
, uint32_t override_tag
, uint32_t vrf_id
)
875 struct sctp_association
*asoc
;
878 * Anything set to zero is taken care of by the allocation routine's
883 * Up front select what scoping to apply on addresses I tell my peer
884 * Not sure what to do with these right now, we will need to come up
885 * with a way to set them. We may need to pass them through from the
886 * caller in the sctp_aloc_assoc() function.
891 /* init all variables to a known value. */
892 SCTP_SET_STATE(&stcb
->asoc
, SCTP_STATE_INUSE
);
893 asoc
->max_burst
= m
->sctp_ep
.max_burst
;
894 asoc
->heart_beat_delay
= TICKS_TO_MSEC(m
->sctp_ep
.sctp_timeoutticks
[SCTP_TIMER_HEARTBEAT
]);
895 asoc
->cookie_life
= m
->sctp_ep
.def_cookie_life
;
896 asoc
->sctp_cmt_on_off
= (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_on_off
);
897 /* JRS 5/21/07 - Init CMT PF variables */
898 asoc
->sctp_cmt_pf
= (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf
);
899 asoc
->sctp_frag_point
= m
->sctp_frag_point
;
901 asoc
->default_tos
= m
->ip_inp
.inp
.inp_ip_tos
;
903 asoc
->default_tos
= 0;
907 asoc
->default_flowlabel
= ((struct in6pcb
*)m
)->in6p_flowinfo
;
909 asoc
->default_flowlabel
= 0;
911 asoc
->sb_send_resv
= 0;
915 (void)SCTP_GETTIME_TIMEVAL(&now
);
916 if (sctp_is_in_timewait(override_tag
)) {
918 * It must be in the time-wait hash, we put it there
919 * when we aloc one. If not the peer is playing
922 asoc
->my_vtag
= override_tag
;
924 SCTP_LTRACE_ERR_RET(NULL
, stcb
, NULL
, SCTP_FROM_SCTPUTIL
, ENOMEM
);
926 panic("Huh is_in_timewait fails");
932 asoc
->my_vtag
= sctp_select_a_tag(m
, 1);
934 /* Get the nonce tags */
935 asoc
->my_vtag_nonce
= sctp_select_a_tag(m
, 0);
936 asoc
->peer_vtag_nonce
= sctp_select_a_tag(m
, 0);
937 asoc
->vrf_id
= vrf_id
;
939 if (sctp_is_feature_on(m
, SCTP_PCB_FLAGS_DONOT_HEARTBEAT
))
940 asoc
->hb_is_disabled
= 1;
942 asoc
->hb_is_disabled
= 0;
944 #ifdef SCTP_ASOCLOG_OF_TSNS
946 asoc
->tsn_out_at
= 0;
947 asoc
->tsn_in_wrapped
= 0;
948 asoc
->tsn_out_wrapped
= 0;
949 asoc
->cumack_log_at
= 0;
950 asoc
->cumack_log_atsnt
= 0;
952 #ifdef SCTP_FS_SPEC_LOG
956 asoc
->assoc_up_sent
= 0;
957 asoc
->assoc_id
= asoc
->my_vtag
;
958 asoc
->asconf_seq_out
= asoc
->str_reset_seq_out
= asoc
->init_seq_number
= asoc
->sending_seq
=
959 sctp_select_initial_TSN(&m
->sctp_ep
);
960 asoc
->asconf_seq_out_acked
= asoc
->asconf_seq_out
- 1;
961 /* we are optimisitic here */
962 asoc
->peer_supports_pktdrop
= 1;
964 asoc
->sent_queue_retran_cnt
= 0;
967 asoc
->last_net_data_came_from
= NULL
;
969 /* This will need to be adjusted */
970 asoc
->last_cwr_tsn
= asoc
->init_seq_number
- 1;
971 asoc
->last_acked_seq
= asoc
->init_seq_number
- 1;
972 asoc
->advanced_peer_ack_point
= asoc
->last_acked_seq
;
973 asoc
->asconf_seq_in
= asoc
->last_acked_seq
;
975 /* here we are different, we hold the next one we expect */
976 asoc
->str_reset_seq_in
= asoc
->last_acked_seq
+ 1;
978 asoc
->initial_init_rto_max
= m
->sctp_ep
.initial_init_rto_max
;
979 asoc
->initial_rto
= m
->sctp_ep
.initial_rto
;
981 asoc
->max_init_times
= m
->sctp_ep
.max_init_times
;
982 asoc
->max_send_times
= m
->sctp_ep
.max_send_times
;
983 asoc
->def_net_failure
= m
->sctp_ep
.def_net_failure
;
984 asoc
->free_chunk_cnt
= 0;
986 asoc
->iam_blocking
= 0;
987 /* ECN Nonce initialization */
988 asoc
->context
= m
->sctp_context
;
989 asoc
->def_send
= m
->def_send
;
990 asoc
->ecn_nonce_allowed
= 0;
991 asoc
->receiver_nonce_sum
= 1;
992 asoc
->nonce_sum_expect_base
= 1;
993 asoc
->nonce_sum_check
= 1;
994 asoc
->nonce_resync_tsn
= 0;
995 asoc
->nonce_wait_for_ecne
= 0;
996 asoc
->nonce_wait_tsn
= 0;
997 asoc
->delayed_ack
= TICKS_TO_MSEC(m
->sctp_ep
.sctp_timeoutticks
[SCTP_TIMER_RECV
]);
998 asoc
->sack_freq
= m
->sctp_ep
.sctp_sack_freq
;
999 asoc
->pr_sctp_cnt
= 0;
1000 asoc
->total_output_queue_size
= 0;
1002 if (m
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
1003 struct in6pcb
*inp6
;
1005 /* Its a V6 socket */
1006 inp6
= (struct in6pcb
*)m
;
1007 asoc
->ipv6_addr_legal
= 1;
1008 /* Now look at the binding flag to see if V4 will be legal */
1009 if (SCTP_IPV6_V6ONLY(inp6
) == 0) {
1010 asoc
->ipv4_addr_legal
= 1;
1012 /* V4 addresses are NOT legal on the association */
1013 asoc
->ipv4_addr_legal
= 0;
1016 /* Its a V4 socket, no - V6 */
1017 asoc
->ipv4_addr_legal
= 1;
1018 asoc
->ipv6_addr_legal
= 0;
1021 asoc
->my_rwnd
= max(SCTP_SB_LIMIT_RCV(m
->sctp_socket
), SCTP_MINIMAL_RWND
);
1022 asoc
->peers_rwnd
= SCTP_SB_LIMIT_RCV(m
->sctp_socket
);
1024 asoc
->smallest_mtu
= m
->sctp_frag_point
;
1025 #ifdef SCTP_PRINT_FOR_B_AND_M
1026 SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1027 asoc
->smallest_mtu
);
1029 asoc
->minrto
= m
->sctp_ep
.sctp_minrto
;
1030 asoc
->maxrto
= m
->sctp_ep
.sctp_maxrto
;
1032 asoc
->locked_on_sending
= NULL
;
1033 asoc
->stream_locked_on
= 0;
1034 asoc
->ecn_echo_cnt_onq
= 0;
1035 asoc
->stream_locked
= 0;
1037 asoc
->send_sack
= 1;
1039 LIST_INIT(&asoc
->sctp_restricted_addrs
);
1041 TAILQ_INIT(&asoc
->nets
);
1042 TAILQ_INIT(&asoc
->pending_reply_queue
);
1043 TAILQ_INIT(&asoc
->asconf_ack_sent
);
1044 /* Setup to fill the hb random cache at first HB */
1045 asoc
->hb_random_idx
= 4;
1047 asoc
->sctp_autoclose_ticks
= m
->sctp_ep
.auto_close_time
;
1050 * JRS - Pick the default congestion control module based on the
1053 switch (m
->sctp_ep
.sctp_default_cc_module
) {
1054 /* JRS - Standard TCP congestion control */
1055 case SCTP_CC_RFC2581
:
1057 stcb
->asoc
.congestion_control_module
= SCTP_CC_RFC2581
;
1058 stcb
->asoc
.cc_functions
.sctp_set_initial_cc_param
= &sctp_set_initial_cc_param
;
1059 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_sack
= &sctp_cwnd_update_after_sack
;
1060 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_fr
= &sctp_cwnd_update_after_fr
;
1061 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_timeout
= &sctp_cwnd_update_after_timeout
;
1062 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_ecn_echo
= &sctp_cwnd_update_after_ecn_echo
;
1063 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_packet_dropped
= &sctp_cwnd_update_after_packet_dropped
;
1064 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_output
= &sctp_cwnd_update_after_output
;
1065 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_fr_timer
= &sctp_cwnd_update_after_fr_timer
;
1068 /* JRS - High Speed TCP congestion control (Floyd) */
1071 stcb
->asoc
.congestion_control_module
= SCTP_CC_HSTCP
;
1072 stcb
->asoc
.cc_functions
.sctp_set_initial_cc_param
= &sctp_set_initial_cc_param
;
1073 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_sack
= &sctp_hs_cwnd_update_after_sack
;
1074 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_fr
= &sctp_hs_cwnd_update_after_fr
;
1075 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_timeout
= &sctp_cwnd_update_after_timeout
;
1076 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_ecn_echo
= &sctp_cwnd_update_after_ecn_echo
;
1077 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_packet_dropped
= &sctp_cwnd_update_after_packet_dropped
;
1078 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_output
= &sctp_cwnd_update_after_output
;
1079 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_fr_timer
= &sctp_cwnd_update_after_fr_timer
;
1082 /* JRS - HTCP congestion control */
1085 stcb
->asoc
.congestion_control_module
= SCTP_CC_HTCP
;
1086 stcb
->asoc
.cc_functions
.sctp_set_initial_cc_param
= &sctp_htcp_set_initial_cc_param
;
1087 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_sack
= &sctp_htcp_cwnd_update_after_sack
;
1088 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_fr
= &sctp_htcp_cwnd_update_after_fr
;
1089 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_timeout
= &sctp_htcp_cwnd_update_after_timeout
;
1090 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_ecn_echo
= &sctp_htcp_cwnd_update_after_ecn_echo
;
1091 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_packet_dropped
= &sctp_cwnd_update_after_packet_dropped
;
1092 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_output
= &sctp_cwnd_update_after_output
;
1093 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_fr_timer
= &sctp_htcp_cwnd_update_after_fr_timer
;
1096 /* JRS - By default, use RFC2581 */
1099 stcb
->asoc
.congestion_control_module
= SCTP_CC_RFC2581
;
1100 stcb
->asoc
.cc_functions
.sctp_set_initial_cc_param
= &sctp_set_initial_cc_param
;
1101 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_sack
= &sctp_cwnd_update_after_sack
;
1102 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_fr
= &sctp_cwnd_update_after_fr
;
1103 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_timeout
= &sctp_cwnd_update_after_timeout
;
1104 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_ecn_echo
= &sctp_cwnd_update_after_ecn_echo
;
1105 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_packet_dropped
= &sctp_cwnd_update_after_packet_dropped
;
1106 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_output
= &sctp_cwnd_update_after_output
;
1107 stcb
->asoc
.cc_functions
.sctp_cwnd_update_after_fr_timer
= &sctp_cwnd_update_after_fr_timer
;
1113 * Now the stream parameters, here we allocate space for all streams
1114 * that we request by default.
1116 asoc
->streamoutcnt
= asoc
->pre_open_streams
=
1117 m
->sctp_ep
.pre_open_stream_count
;
1118 SCTP_MALLOC(asoc
->strmout
, struct sctp_stream_out
*,
1119 asoc
->streamoutcnt
* sizeof(struct sctp_stream_out
),
1121 if (asoc
->strmout
== NULL
) {
1122 /* big trouble no memory */
1123 SCTP_LTRACE_ERR_RET(NULL
, stcb
, NULL
, SCTP_FROM_SCTPUTIL
, ENOMEM
);
1126 for (i
= 0; i
< asoc
->streamoutcnt
; i
++) {
1128 * inbound side must be set to 0xffff, also NOTE when we get
1129 * the INIT-ACK back (for INIT sender) we MUST reduce the
1130 * count (streamoutcnt) but first check if we sent to any of
1131 * the upper streams that were dropped (if some were). Those
1132 * that were dropped must be notified to the upper layer as
1135 asoc
->strmout
[i
].next_sequence_sent
= 0x0;
1136 TAILQ_INIT(&asoc
->strmout
[i
].outqueue
);
1137 asoc
->strmout
[i
].stream_no
= i
;
1138 asoc
->strmout
[i
].last_msg_incomplete
= 0;
1139 asoc
->strmout
[i
].next_spoke
.tqe_next
= 0;
1140 asoc
->strmout
[i
].next_spoke
.tqe_prev
= 0;
1142 /* Now the mapping array */
1143 asoc
->mapping_array_size
= SCTP_INITIAL_MAPPING_ARRAY
;
1144 SCTP_MALLOC(asoc
->mapping_array
, uint8_t *, asoc
->mapping_array_size
,
1146 if (asoc
->mapping_array
== NULL
) {
1147 SCTP_FREE(asoc
->strmout
, SCTP_M_STRMO
);
1148 SCTP_LTRACE_ERR_RET(NULL
, stcb
, NULL
, SCTP_FROM_SCTPUTIL
, ENOMEM
);
1151 memset(asoc
->mapping_array
, 0, asoc
->mapping_array_size
);
1152 /* Now the init of the other outqueues */
1153 TAILQ_INIT(&asoc
->free_chunks
);
1154 TAILQ_INIT(&asoc
->out_wheel
);
1155 TAILQ_INIT(&asoc
->control_send_queue
);
1156 TAILQ_INIT(&asoc
->asconf_send_queue
);
1157 TAILQ_INIT(&asoc
->send_queue
);
1158 TAILQ_INIT(&asoc
->sent_queue
);
1159 TAILQ_INIT(&asoc
->reasmqueue
);
1160 TAILQ_INIT(&asoc
->resetHead
);
1161 asoc
->max_inbound_streams
= m
->sctp_ep
.max_open_streams_intome
;
1162 TAILQ_INIT(&asoc
->asconf_queue
);
1163 /* authentication fields */
1164 asoc
->authinfo
.random
= NULL
;
1165 asoc
->authinfo
.assoc_key
= NULL
;
1166 asoc
->authinfo
.assoc_keyid
= 0;
1167 asoc
->authinfo
.recv_key
= NULL
;
1168 asoc
->authinfo
.recv_keyid
= 0;
1169 LIST_INIT(&asoc
->shared_keys
);
1170 asoc
->marked_retrans
= 0;
1174 asoc
->timoshutdown
= 0;
1175 asoc
->timoheartbeat
= 0;
1176 asoc
->timocookie
= 0;
1177 asoc
->timoshutdownack
= 0;
1178 (void)SCTP_GETTIME_TIMEVAL(&asoc
->start_time
);
1179 asoc
->discontinuity_time
= asoc
->start_time
;
1181 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1182 * freed later whe the association is freed.
1188 sctp_expand_mapping_array(struct sctp_association
*asoc
, uint32_t needed
)
1190 /* mapping array needs to grow */
1194 new_size
= asoc
->mapping_array_size
+ ((needed
+ 7) / 8 + SCTP_MAPPING_ARRAY_INCR
);
1195 SCTP_MALLOC(new_array
, uint8_t *, new_size
, SCTP_M_MAP
);
1196 if (new_array
== NULL
) {
1197 /* can't get more, forget it */
1198 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1202 memset(new_array
, 0, new_size
);
1203 memcpy(new_array
, asoc
->mapping_array
, asoc
->mapping_array_size
);
1204 SCTP_FREE(asoc
->mapping_array
, SCTP_M_MAP
);
1205 asoc
->mapping_array
= new_array
;
1206 asoc
->mapping_array_size
= new_size
;
1210 #if defined(SCTP_USE_THREAD_BASED_ITERATOR)
1212 sctp_iterator_work(struct sctp_iterator
*it
)
1214 int iteration_count
= 0;
1217 SCTP_ITERATOR_LOCK();
1219 SCTP_INP_DECR_REF(it
->inp
);
1221 if (it
->inp
== NULL
) {
1222 /* iterator is complete */
1224 SCTP_ITERATOR_UNLOCK();
1225 if (it
->function_atend
!= NULL
) {
1226 (*it
->function_atend
) (it
->pointer
, it
->val
);
1228 SCTP_FREE(it
, SCTP_M_ITER
);
1232 SCTP_INP_WLOCK(it
->inp
);
1233 while (((it
->pcb_flags
) &&
1234 ((it
->inp
->sctp_flags
& it
->pcb_flags
) != it
->pcb_flags
)) ||
1235 ((it
->pcb_features
) &&
1236 ((it
->inp
->sctp_features
& it
->pcb_features
) != it
->pcb_features
))) {
1237 /* endpoint flags or features don't match, so keep looking */
1238 if (it
->iterator_flags
& SCTP_ITERATOR_DO_SINGLE_INP
) {
1239 SCTP_INP_WUNLOCK(it
->inp
);
1240 goto done_with_iterator
;
1242 SCTP_INP_WUNLOCK(it
->inp
);
1243 it
->inp
= LIST_NEXT(it
->inp
, sctp_list
);
1244 if (it
->inp
== NULL
) {
1245 goto done_with_iterator
;
1247 SCTP_INP_WLOCK(it
->inp
);
1250 SCTP_INP_WUNLOCK(it
->inp
);
1251 SCTP_INP_RLOCK(it
->inp
);
1253 /* now go through each assoc which is in the desired state */
1254 if (it
->done_current_ep
== 0) {
1255 if (it
->function_inp
!= NULL
)
1256 inp_skip
= (*it
->function_inp
) (it
->inp
, it
->pointer
, it
->val
);
1257 it
->done_current_ep
= 1;
1259 if (it
->stcb
== NULL
) {
1260 /* run the per instance function */
1261 it
->stcb
= LIST_FIRST(&it
->inp
->sctp_asoc_list
);
1263 if ((inp_skip
) || it
->stcb
== NULL
) {
1264 if (it
->function_inp_end
!= NULL
) {
1265 inp_skip
= (*it
->function_inp_end
) (it
->inp
,
1269 SCTP_INP_RUNLOCK(it
->inp
);
1273 SCTP_TCB_LOCK(it
->stcb
);
1274 if (it
->asoc_state
&& ((it
->stcb
->asoc
.state
& it
->asoc_state
) != it
->asoc_state
)) {
1275 /* not in the right state... keep looking */
1276 SCTP_TCB_UNLOCK(it
->stcb
);
1279 /* see if we have limited out the iterator loop */
1281 if (iteration_count
> SCTP_ITERATOR_MAX_AT_ONCE
) {
1282 /* Pause to let others grab the lock */
1283 atomic_add_int(&it
->stcb
->asoc
.refcnt
, 1);
1284 SCTP_TCB_UNLOCK(it
->stcb
);
1286 SCTP_INP_INCR_REF(it
->inp
);
1287 SCTP_INP_RUNLOCK(it
->inp
);
1288 SCTP_ITERATOR_UNLOCK();
1289 SCTP_ITERATOR_LOCK();
1290 SCTP_INP_RLOCK(it
->inp
);
1292 SCTP_INP_DECR_REF(it
->inp
);
1293 SCTP_TCB_LOCK(it
->stcb
);
1294 atomic_add_int(&it
->stcb
->asoc
.refcnt
, -1);
1295 iteration_count
= 0;
1297 /* run function on this one */
1298 (*it
->function_assoc
) (it
->inp
, it
->stcb
, it
->pointer
, it
->val
);
1301 * we lie here, it really needs to have its own type but
1302 * first I must verify that this won't effect things :-0
1304 if (it
->no_chunk_output
== 0)
1305 sctp_chunk_output(it
->inp
, it
->stcb
, SCTP_OUTPUT_FROM_T3
, SCTP_SO_NOT_LOCKED
);
1307 SCTP_TCB_UNLOCK(it
->stcb
);
1309 it
->stcb
= LIST_NEXT(it
->stcb
, sctp_tcblist
);
1310 if (it
->stcb
== NULL
) {
1311 /* Run last function */
1312 if (it
->function_inp_end
!= NULL
) {
1313 inp_skip
= (*it
->function_inp_end
) (it
->inp
,
1319 SCTP_INP_RUNLOCK(it
->inp
);
1321 /* done with all assocs on this endpoint, move on to next endpoint */
1322 it
->done_current_ep
= 0;
1323 SCTP_INP_WLOCK(it
->inp
);
1324 SCTP_INP_WUNLOCK(it
->inp
);
1325 if (it
->iterator_flags
& SCTP_ITERATOR_DO_SINGLE_INP
) {
1328 SCTP_INP_INFO_RLOCK();
1329 it
->inp
= LIST_NEXT(it
->inp
, sctp_list
);
1330 SCTP_INP_INFO_RUNLOCK();
1332 if (it
->inp
== NULL
) {
1333 goto done_with_iterator
;
1335 goto select_a_new_ep
;
1339 sctp_iterator_worker(void)
1341 struct sctp_iterator
*it
= NULL
;
1343 /* This function is called with the WQ lock in place */
1345 SCTP_BASE_INFO(iterator_running
) = 1;
1347 it
= TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead
));
1349 /* now lets work on this one */
1350 TAILQ_REMOVE(&SCTP_BASE_INFO(iteratorhead
), it
, sctp_nxt_itr
);
1351 SCTP_IPI_ITERATOR_WQ_UNLOCK();
1352 sctp_iterator_work(it
);
1353 SCTP_IPI_ITERATOR_WQ_LOCK();
1354 /* sa_ignore FREED_MEMORY */
1355 it
= TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead
));
1357 if (TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead
))) {
1360 SCTP_BASE_INFO(iterator_running
) = 0;
1368 sctp_handle_addr_wq(void)
1370 /* deal with the ADDR wq from the rtsock calls */
1371 struct sctp_laddr
*wi
;
1372 struct sctp_asconf_iterator
*asc
;
1374 SCTP_MALLOC(asc
, struct sctp_asconf_iterator
*,
1375 sizeof(struct sctp_asconf_iterator
), SCTP_M_ASC_IT
);
1377 /* Try later, no memory */
1378 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ
,
1379 (struct sctp_inpcb
*)NULL
,
1380 (struct sctp_tcb
*)NULL
,
1381 (struct sctp_nets
*)NULL
);
1384 LIST_INIT(&asc
->list_of_work
);
1386 SCTP_IPI_ITERATOR_WQ_LOCK();
1387 wi
= LIST_FIRST(&SCTP_BASE_INFO(addr_wq
));
1388 while (wi
!= NULL
) {
1389 LIST_REMOVE(wi
, sctp_nxt_addr
);
1390 LIST_INSERT_HEAD(&asc
->list_of_work
, wi
, sctp_nxt_addr
);
1392 wi
= LIST_FIRST(&SCTP_BASE_INFO(addr_wq
));
1394 SCTP_IPI_ITERATOR_WQ_UNLOCK();
1395 if (asc
->cnt
== 0) {
1396 SCTP_FREE(asc
, SCTP_M_ASC_IT
);
1398 (void)sctp_initiate_iterator(sctp_asconf_iterator_ep
,
1399 sctp_asconf_iterator_stcb
,
1400 NULL
, /* No ep end for boundall */
1401 SCTP_PCB_FLAGS_BOUNDALL
,
1402 SCTP_PCB_ANY_FEATURES
,
1403 SCTP_ASOC_ANY_STATE
,
1405 sctp_asconf_iterator_end
, NULL
, 0);
1413 sctp_timeout_handler(void *t
)
1415 struct sctp_inpcb
*inp
;
1416 struct sctp_tcb
*stcb
;
1417 struct sctp_nets
*net
;
1418 struct sctp_timer
*tmr
;
1420 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1424 int did_output
, type
;
1425 struct sctp_iterator
*it
= NULL
;
1427 tmr
= (struct sctp_timer
*)t
;
1428 inp
= (struct sctp_inpcb
*)tmr
->ep
;
1429 stcb
= (struct sctp_tcb
*)tmr
->tcb
;
1430 net
= (struct sctp_nets
*)tmr
->net
;
1433 #ifdef SCTP_AUDITING_ENABLED
1434 sctp_audit_log(0xF0, (uint8_t) tmr
->type
);
1435 sctp_auditing(3, inp
, stcb
, net
);
1438 /* sanity checks... */
1439 if (tmr
->self
!= (void *)tmr
) {
1441 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1446 tmr
->stopped_from
= 0xa001;
1447 if (!SCTP_IS_TIMER_TYPE_VALID(tmr
->type
)) {
1449 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1454 tmr
->stopped_from
= 0xa002;
1455 if ((tmr
->type
!= SCTP_TIMER_TYPE_ADDR_WQ
) && (inp
== NULL
)) {
1458 /* if this is an iterator timeout, get the struct and clear inp */
1459 tmr
->stopped_from
= 0xa003;
1460 if (tmr
->type
== SCTP_TIMER_TYPE_ITERATOR
) {
1461 it
= (struct sctp_iterator
*)inp
;
1466 SCTP_INP_INCR_REF(inp
);
1467 if ((inp
->sctp_socket
== 0) &&
1468 ((tmr
->type
!= SCTP_TIMER_TYPE_INPKILL
) &&
1469 (tmr
->type
!= SCTP_TIMER_TYPE_SHUTDOWN
) &&
1470 (tmr
->type
!= SCTP_TIMER_TYPE_SHUTDOWNACK
) &&
1471 (tmr
->type
!= SCTP_TIMER_TYPE_SHUTDOWNGUARD
) &&
1472 (tmr
->type
!= SCTP_TIMER_TYPE_ASOCKILL
))
1474 SCTP_INP_DECR_REF(inp
);
1478 tmr
->stopped_from
= 0xa004;
1480 atomic_add_int(&stcb
->asoc
.refcnt
, 1);
1481 if (stcb
->asoc
.state
== 0) {
1482 atomic_add_int(&stcb
->asoc
.refcnt
, -1);
1484 SCTP_INP_DECR_REF(inp
);
1489 tmr
->stopped_from
= 0xa005;
1490 SCTPDBG(SCTP_DEBUG_TIMER1
, "Timer type %d goes off\n", tmr
->type
);
1491 if (!SCTP_OS_TIMER_ACTIVE(&tmr
->timer
)) {
1493 SCTP_INP_DECR_REF(inp
);
1496 atomic_add_int(&stcb
->asoc
.refcnt
, -1);
1500 tmr
->stopped_from
= 0xa006;
1503 SCTP_TCB_LOCK(stcb
);
1504 atomic_add_int(&stcb
->asoc
.refcnt
, -1);
1505 if ((tmr
->type
!= SCTP_TIMER_TYPE_ASOCKILL
) &&
1506 ((stcb
->asoc
.state
== 0) ||
1507 (stcb
->asoc
.state
& SCTP_STATE_ABOUT_TO_BE_FREED
))) {
1508 SCTP_TCB_UNLOCK(stcb
);
1510 SCTP_INP_DECR_REF(inp
);
1515 /* record in stopped what t-o occured */
1516 tmr
->stopped_from
= tmr
->type
;
1518 /* mark as being serviced now */
1519 if (SCTP_OS_TIMER_PENDING(&tmr
->timer
)) {
1521 * Callout has been rescheduled.
1525 if (!SCTP_OS_TIMER_ACTIVE(&tmr
->timer
)) {
1527 * Not active, so no action.
1531 SCTP_OS_TIMER_DEACTIVATE(&tmr
->timer
);
1533 /* call the handler for the appropriate timer type */
1534 switch (tmr
->type
) {
1535 case SCTP_TIMER_TYPE_ZERO_COPY
:
1539 if (sctp_is_feature_on(inp
, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE
)) {
1540 SCTP_ZERO_COPY_EVENT(inp
, inp
->sctp_socket
);
1543 case SCTP_TIMER_TYPE_ZCOPY_SENDQ
:
1547 if (sctp_is_feature_on(inp
, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE
)) {
1548 SCTP_ZERO_COPY_SENDQ_EVENT(inp
, inp
->sctp_socket
);
1551 case SCTP_TIMER_TYPE_ADDR_WQ
:
1552 sctp_handle_addr_wq();
1554 case SCTP_TIMER_TYPE_ITERATOR
:
1555 SCTP_STAT_INCR(sctps_timoiterator
);
1556 sctp_iterator_timer(it
);
1558 case SCTP_TIMER_TYPE_SEND
:
1559 if ((stcb
== NULL
) || (inp
== NULL
)) {
1562 SCTP_STAT_INCR(sctps_timodata
);
1563 stcb
->asoc
.timodata
++;
1564 stcb
->asoc
.num_send_timers_up
--;
1565 if (stcb
->asoc
.num_send_timers_up
< 0) {
1566 stcb
->asoc
.num_send_timers_up
= 0;
1568 SCTP_TCB_LOCK_ASSERT(stcb
);
1569 cur_oerr
= stcb
->asoc
.overall_error_count
;
1570 retcode
= sctp_t3rxt_timer(inp
, stcb
, net
);
1572 /* no need to unlock on tcb its gone */
1576 SCTP_TCB_LOCK_ASSERT(stcb
);
1577 #ifdef SCTP_AUDITING_ENABLED
1578 sctp_auditing(4, inp
, stcb
, net
);
1580 sctp_chunk_output(inp
, stcb
, SCTP_OUTPUT_FROM_T3
, SCTP_SO_NOT_LOCKED
);
1581 if ((stcb
->asoc
.num_send_timers_up
== 0) &&
1582 (stcb
->asoc
.sent_queue_cnt
> 0)
1584 struct sctp_tmit_chunk
*chk
;
1587 * safeguard. If there on some on the sent queue
1588 * somewhere but no timers running something is
1589 * wrong... so we start a timer on the first chunk
1590 * on the send queue on whatever net it is sent to.
1592 chk
= TAILQ_FIRST(&stcb
->asoc
.sent_queue
);
1593 sctp_timer_start(SCTP_TIMER_TYPE_SEND
, inp
, stcb
,
1597 case SCTP_TIMER_TYPE_INIT
:
1598 if ((stcb
== NULL
) || (inp
== NULL
)) {
1601 SCTP_STAT_INCR(sctps_timoinit
);
1602 stcb
->asoc
.timoinit
++;
1603 if (sctp_t1init_timer(inp
, stcb
, net
)) {
1604 /* no need to unlock on tcb its gone */
1607 /* We do output but not here */
1610 case SCTP_TIMER_TYPE_RECV
:
1611 if ((stcb
== NULL
) || (inp
== NULL
)) {
1616 SCTP_STAT_INCR(sctps_timosack
);
1617 stcb
->asoc
.timosack
++;
1618 if (stcb
->asoc
.cumulative_tsn
!= stcb
->asoc
.highest_tsn_inside_map
)
1619 sctp_sack_check(stcb
, 0, 0, &abort_flag
);
1620 sctp_send_sack(stcb
);
1622 #ifdef SCTP_AUDITING_ENABLED
1623 sctp_auditing(4, inp
, stcb
, net
);
1625 sctp_chunk_output(inp
, stcb
, SCTP_OUTPUT_FROM_SACK_TMR
, SCTP_SO_NOT_LOCKED
);
1627 case SCTP_TIMER_TYPE_SHUTDOWN
:
1628 if ((stcb
== NULL
) || (inp
== NULL
)) {
1631 if (sctp_shutdown_timer(inp
, stcb
, net
)) {
1632 /* no need to unlock on tcb its gone */
1635 SCTP_STAT_INCR(sctps_timoshutdown
);
1636 stcb
->asoc
.timoshutdown
++;
1637 #ifdef SCTP_AUDITING_ENABLED
1638 sctp_auditing(4, inp
, stcb
, net
);
1640 sctp_chunk_output(inp
, stcb
, SCTP_OUTPUT_FROM_SHUT_TMR
, SCTP_SO_NOT_LOCKED
);
1642 case SCTP_TIMER_TYPE_HEARTBEAT
:
1644 struct sctp_nets
*lnet
;
1645 int cnt_of_unconf
= 0;
1647 if ((stcb
== NULL
) || (inp
== NULL
)) {
1650 SCTP_STAT_INCR(sctps_timoheartbeat
);
1651 stcb
->asoc
.timoheartbeat
++;
1652 TAILQ_FOREACH(lnet
, &stcb
->asoc
.nets
, sctp_next
) {
1653 if ((lnet
->dest_state
& SCTP_ADDR_UNCONFIRMED
) &&
1654 (lnet
->dest_state
& SCTP_ADDR_REACHABLE
)) {
1658 if (cnt_of_unconf
== 0) {
1659 if (sctp_heartbeat_timer(inp
, stcb
, lnet
,
1661 /* no need to unlock on tcb its gone */
1665 #ifdef SCTP_AUDITING_ENABLED
1666 sctp_auditing(4, inp
, stcb
, lnet
);
1668 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT
,
1669 stcb
->sctp_ep
, stcb
, lnet
);
1670 sctp_chunk_output(inp
, stcb
, SCTP_OUTPUT_FROM_HB_TMR
, SCTP_SO_NOT_LOCKED
);
1673 case SCTP_TIMER_TYPE_COOKIE
:
1674 if ((stcb
== NULL
) || (inp
== NULL
)) {
1677 if (sctp_cookie_timer(inp
, stcb
, net
)) {
1678 /* no need to unlock on tcb its gone */
1681 SCTP_STAT_INCR(sctps_timocookie
);
1682 stcb
->asoc
.timocookie
++;
1683 #ifdef SCTP_AUDITING_ENABLED
1684 sctp_auditing(4, inp
, stcb
, net
);
1687 * We consider T3 and Cookie timer pretty much the same with
1688 * respect to where from in chunk_output.
1690 sctp_chunk_output(inp
, stcb
, SCTP_OUTPUT_FROM_T3
, SCTP_SO_NOT_LOCKED
);
1692 case SCTP_TIMER_TYPE_NEWCOOKIE
:
1700 SCTP_STAT_INCR(sctps_timosecret
);
1701 (void)SCTP_GETTIME_TIMEVAL(&tv
);
1702 SCTP_INP_WLOCK(inp
);
1703 inp
->sctp_ep
.time_of_secret_change
= tv
.tv_sec
;
1704 inp
->sctp_ep
.last_secret_number
=
1705 inp
->sctp_ep
.current_secret_number
;
1706 inp
->sctp_ep
.current_secret_number
++;
1707 if (inp
->sctp_ep
.current_secret_number
>=
1708 SCTP_HOW_MANY_SECRETS
) {
1709 inp
->sctp_ep
.current_secret_number
= 0;
1711 secret
= (int)inp
->sctp_ep
.current_secret_number
;
1712 for (i
= 0; i
< SCTP_NUMBER_OF_SECRETS
; i
++) {
1713 inp
->sctp_ep
.secret_key
[secret
][i
] =
1714 sctp_select_initial_TSN(&inp
->sctp_ep
);
1716 SCTP_INP_WUNLOCK(inp
);
1717 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE
, inp
, stcb
, net
);
1721 case SCTP_TIMER_TYPE_PATHMTURAISE
:
1722 if ((stcb
== NULL
) || (inp
== NULL
)) {
1725 SCTP_STAT_INCR(sctps_timopathmtu
);
1726 sctp_pathmtu_timer(inp
, stcb
, net
);
1729 case SCTP_TIMER_TYPE_SHUTDOWNACK
:
1730 if ((stcb
== NULL
) || (inp
== NULL
)) {
1733 if (sctp_shutdownack_timer(inp
, stcb
, net
)) {
1734 /* no need to unlock on tcb its gone */
1737 SCTP_STAT_INCR(sctps_timoshutdownack
);
1738 stcb
->asoc
.timoshutdownack
++;
1739 #ifdef SCTP_AUDITING_ENABLED
1740 sctp_auditing(4, inp
, stcb
, net
);
1742 sctp_chunk_output(inp
, stcb
, SCTP_OUTPUT_FROM_SHUT_ACK_TMR
, SCTP_SO_NOT_LOCKED
);
1744 case SCTP_TIMER_TYPE_SHUTDOWNGUARD
:
1745 if ((stcb
== NULL
) || (inp
== NULL
)) {
1748 SCTP_STAT_INCR(sctps_timoshutdownguard
);
1749 sctp_abort_an_association(inp
, stcb
,
1750 SCTP_SHUTDOWN_GUARD_EXPIRES
, NULL
, SCTP_SO_NOT_LOCKED
);
1751 /* no need to unlock on tcb its gone */
1754 case SCTP_TIMER_TYPE_STRRESET
:
1755 if ((stcb
== NULL
) || (inp
== NULL
)) {
1758 if (sctp_strreset_timer(inp
, stcb
, net
)) {
1759 /* no need to unlock on tcb its gone */
1762 SCTP_STAT_INCR(sctps_timostrmrst
);
1763 sctp_chunk_output(inp
, stcb
, SCTP_OUTPUT_FROM_STRRST_TMR
, SCTP_SO_NOT_LOCKED
);
1765 case SCTP_TIMER_TYPE_EARLYFR
:
1766 /* Need to do FR of things for net */
1767 if ((stcb
== NULL
) || (inp
== NULL
)) {
1770 SCTP_STAT_INCR(sctps_timoearlyfr
);
1771 sctp_early_fr_timer(inp
, stcb
, net
);
1773 case SCTP_TIMER_TYPE_ASCONF
:
1774 if ((stcb
== NULL
) || (inp
== NULL
)) {
1777 if (sctp_asconf_timer(inp
, stcb
, net
)) {
1778 /* no need to unlock on tcb its gone */
1781 SCTP_STAT_INCR(sctps_timoasconf
);
1782 #ifdef SCTP_AUDITING_ENABLED
1783 sctp_auditing(4, inp
, stcb
, net
);
1785 sctp_chunk_output(inp
, stcb
, SCTP_OUTPUT_FROM_ASCONF_TMR
, SCTP_SO_NOT_LOCKED
);
1787 case SCTP_TIMER_TYPE_PRIM_DELETED
:
1788 if ((stcb
== NULL
) || (inp
== NULL
)) {
1791 sctp_delete_prim_timer(inp
, stcb
, net
);
1792 SCTP_STAT_INCR(sctps_timodelprim
);
1795 case SCTP_TIMER_TYPE_AUTOCLOSE
:
1796 if ((stcb
== NULL
) || (inp
== NULL
)) {
1799 SCTP_STAT_INCR(sctps_timoautoclose
);
1800 sctp_autoclose_timer(inp
, stcb
, net
);
1801 sctp_chunk_output(inp
, stcb
, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR
, SCTP_SO_NOT_LOCKED
);
1804 case SCTP_TIMER_TYPE_ASOCKILL
:
1805 if ((stcb
== NULL
) || (inp
== NULL
)) {
1808 SCTP_STAT_INCR(sctps_timoassockill
);
1809 /* Can we free it yet? */
1810 SCTP_INP_DECR_REF(inp
);
1811 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL
, inp
, stcb
, NULL
, SCTP_FROM_SCTPUTIL
+ SCTP_LOC_1
);
1812 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1813 so
= SCTP_INP_SO(inp
);
1814 atomic_add_int(&stcb
->asoc
.refcnt
, 1);
1815 SCTP_TCB_UNLOCK(stcb
);
1816 SCTP_SOCKET_LOCK(so
, 1);
1817 SCTP_TCB_LOCK(stcb
);
1818 atomic_subtract_int(&stcb
->asoc
.refcnt
, 1);
1820 (void)sctp_free_assoc(inp
, stcb
, SCTP_NORMAL_PROC
, SCTP_FROM_SCTPUTIL
+ SCTP_LOC_2
);
1821 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1822 SCTP_SOCKET_UNLOCK(so
, 1);
1825 * free asoc, always unlocks (or destroy's) so prevent
1826 * duplicate unlock or unlock of a free mtx :-0
1830 case SCTP_TIMER_TYPE_INPKILL
:
1831 SCTP_STAT_INCR(sctps_timoinpkill
);
1836 * special case, take away our increment since WE are the
1839 SCTP_INP_DECR_REF(inp
);
1840 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL
, inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
+ SCTP_LOC_3
);
1841 sctp_inpcb_free(inp
, SCTP_FREE_SHOULD_USE_ABORT
,
1842 SCTP_CALLED_DIRECTLY_NOCMPSET
);
1846 SCTPDBG(SCTP_DEBUG_TIMER1
, "sctp_timeout_handler:unknown timer %d\n",
1850 #ifdef SCTP_AUDITING_ENABLED
1851 sctp_audit_log(0xF1, (uint8_t) tmr
->type
);
1853 sctp_auditing(5, inp
, stcb
, net
);
1855 if ((did_output
) && stcb
) {
1857 * Now we need to clean up the control chunk chain if an
1858 * ECNE is on it. It must be marked as UNSENT again so next
1859 * call will continue to send it until such time that we get
1860 * a CWR, to remove it. It is, however, less likely that we
1861 * will find a ecn echo on the chain though.
1863 sctp_fix_ecn_echo(&stcb
->asoc
);
1867 SCTP_TCB_UNLOCK(stcb
);
1871 SCTP_INP_DECR_REF(inp
);
1874 SCTPDBG(SCTP_DEBUG_TIMER1
, "Timer now complete (type %d)\n",
1879 sctp_timer_start(int t_type
, struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
,
1880 struct sctp_nets
*net
)
1883 struct sctp_timer
*tmr
;
1885 if ((t_type
!= SCTP_TIMER_TYPE_ADDR_WQ
) && (inp
== NULL
))
1892 SCTP_TCB_LOCK_ASSERT(stcb
);
1895 case SCTP_TIMER_TYPE_ZERO_COPY
:
1896 tmr
= &inp
->sctp_ep
.zero_copy_timer
;
1897 to_ticks
= SCTP_ZERO_COPY_TICK_DELAY
;
1899 case SCTP_TIMER_TYPE_ZCOPY_SENDQ
:
1900 tmr
= &inp
->sctp_ep
.zero_copy_sendq_timer
;
1901 to_ticks
= SCTP_ZERO_COPY_SENDQ_TICK_DELAY
;
1903 case SCTP_TIMER_TYPE_ADDR_WQ
:
1904 /* Only 1 tick away :-) */
1905 tmr
= &SCTP_BASE_INFO(addr_wq_timer
);
1906 to_ticks
= SCTP_ADDRESS_TICK_DELAY
;
1908 case SCTP_TIMER_TYPE_ITERATOR
:
1910 struct sctp_iterator
*it
;
1912 it
= (struct sctp_iterator
*)inp
;
1914 to_ticks
= SCTP_ITERATOR_TICKS
;
1917 case SCTP_TIMER_TYPE_SEND
:
1918 /* Here we use the RTO timer */
1922 if ((stcb
== NULL
) || (net
== NULL
)) {
1925 tmr
= &net
->rxt_timer
;
1926 if (net
->RTO
== 0) {
1927 rto_val
= stcb
->asoc
.initial_rto
;
1931 to_ticks
= MSEC_TO_TICKS(rto_val
);
1934 case SCTP_TIMER_TYPE_INIT
:
1936 * Here we use the INIT timer default usually about 1
1939 if ((stcb
== NULL
) || (net
== NULL
)) {
1942 tmr
= &net
->rxt_timer
;
1943 if (net
->RTO
== 0) {
1944 to_ticks
= MSEC_TO_TICKS(stcb
->asoc
.initial_rto
);
1946 to_ticks
= MSEC_TO_TICKS(net
->RTO
);
1949 case SCTP_TIMER_TYPE_RECV
:
1951 * Here we use the Delayed-Ack timer value from the inp
1952 * ususually about 200ms.
1957 tmr
= &stcb
->asoc
.dack_timer
;
1958 to_ticks
= MSEC_TO_TICKS(stcb
->asoc
.delayed_ack
);
1960 case SCTP_TIMER_TYPE_SHUTDOWN
:
1961 /* Here we use the RTO of the destination. */
1962 if ((stcb
== NULL
) || (net
== NULL
)) {
1965 if (net
->RTO
== 0) {
1966 to_ticks
= MSEC_TO_TICKS(stcb
->asoc
.initial_rto
);
1968 to_ticks
= MSEC_TO_TICKS(net
->RTO
);
1970 tmr
= &net
->rxt_timer
;
1972 case SCTP_TIMER_TYPE_HEARTBEAT
:
1974 * the net is used here so that we can add in the RTO. Even
1975 * though we use a different timer. We also add the HB timer
1976 * PLUS a random jitter.
1978 if ((inp
== NULL
) || (stcb
== NULL
)) {
1982 uint8_t this_random
;
1983 int cnt_of_unconf
= 0;
1984 struct sctp_nets
*lnet
;
1986 TAILQ_FOREACH(lnet
, &stcb
->asoc
.nets
, sctp_next
) {
1987 if ((lnet
->dest_state
& SCTP_ADDR_UNCONFIRMED
) &&
1988 (lnet
->dest_state
& SCTP_ADDR_REACHABLE
)) {
1992 if (cnt_of_unconf
) {
1994 (void)sctp_heartbeat_timer(inp
, stcb
, lnet
, cnt_of_unconf
);
1996 if (stcb
->asoc
.hb_random_idx
> 3) {
1997 rndval
= sctp_select_initial_TSN(&inp
->sctp_ep
);
1998 memcpy(stcb
->asoc
.hb_random_values
, &rndval
,
1999 sizeof(stcb
->asoc
.hb_random_values
));
2000 stcb
->asoc
.hb_random_idx
= 0;
2002 this_random
= stcb
->asoc
.hb_random_values
[stcb
->asoc
.hb_random_idx
];
2003 stcb
->asoc
.hb_random_idx
++;
2004 stcb
->asoc
.hb_ect_randombit
= 0;
2006 * this_random will be 0 - 256 ms RTO is in ms.
2008 if ((stcb
->asoc
.hb_is_disabled
) &&
2009 (cnt_of_unconf
== 0)) {
2015 delay
= stcb
->asoc
.heart_beat_delay
;
2016 TAILQ_FOREACH(lnet
, &stcb
->asoc
.nets
, sctp_next
) {
2017 if ((lnet
->dest_state
& SCTP_ADDR_UNCONFIRMED
) &&
2018 ((lnet
->dest_state
& SCTP_ADDR_OUT_OF_SCOPE
) == 0) &&
2019 (lnet
->dest_state
& SCTP_ADDR_REACHABLE
)) {
2023 if (net
->RTO
== 0) {
2024 /* Never been checked */
2025 to_ticks
= this_random
+ stcb
->asoc
.initial_rto
+ delay
;
2027 /* set rto_val to the ms */
2028 to_ticks
= delay
+ net
->RTO
+ this_random
;
2031 if (cnt_of_unconf
) {
2032 to_ticks
= this_random
+ stcb
->asoc
.initial_rto
;
2034 to_ticks
= stcb
->asoc
.heart_beat_delay
+ this_random
+ stcb
->asoc
.initial_rto
;
2038 * Now we must convert the to_ticks that are now in
2041 to_ticks
= MSEC_TO_TICKS(to_ticks
);
2042 tmr
= &stcb
->asoc
.hb_timer
;
2045 case SCTP_TIMER_TYPE_COOKIE
:
2047 * Here we can use the RTO timer from the network since one
2048 * RTT was compelete. If a retran happened then we will be
2049 * using the RTO initial value.
2051 if ((stcb
== NULL
) || (net
== NULL
)) {
2054 if (net
->RTO
== 0) {
2055 to_ticks
= MSEC_TO_TICKS(stcb
->asoc
.initial_rto
);
2057 to_ticks
= MSEC_TO_TICKS(net
->RTO
);
2059 tmr
= &net
->rxt_timer
;
2061 case SCTP_TIMER_TYPE_NEWCOOKIE
:
2063 * nothing needed but the endpoint here ususually about 60
2069 tmr
= &inp
->sctp_ep
.signature_change
;
2070 to_ticks
= inp
->sctp_ep
.sctp_timeoutticks
[SCTP_TIMER_SIGNATURE
];
2072 case SCTP_TIMER_TYPE_ASOCKILL
:
2076 tmr
= &stcb
->asoc
.strreset_timer
;
2077 to_ticks
= MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT
);
2079 case SCTP_TIMER_TYPE_INPKILL
:
2081 * The inp is setup to die. We re-use the signature_chage
2082 * timer since that has stopped and we are in the GONE
2088 tmr
= &inp
->sctp_ep
.signature_change
;
2089 to_ticks
= MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT
);
2091 case SCTP_TIMER_TYPE_PATHMTURAISE
:
2093 * Here we use the value found in the EP for PMTU ususually
2096 if ((stcb
== NULL
) || (inp
== NULL
)) {
2102 to_ticks
= inp
->sctp_ep
.sctp_timeoutticks
[SCTP_TIMER_PMTU
];
2103 tmr
= &net
->pmtu_timer
;
2105 case SCTP_TIMER_TYPE_SHUTDOWNACK
:
2106 /* Here we use the RTO of the destination */
2107 if ((stcb
== NULL
) || (net
== NULL
)) {
2110 if (net
->RTO
== 0) {
2111 to_ticks
= MSEC_TO_TICKS(stcb
->asoc
.initial_rto
);
2113 to_ticks
= MSEC_TO_TICKS(net
->RTO
);
2115 tmr
= &net
->rxt_timer
;
2117 case SCTP_TIMER_TYPE_SHUTDOWNGUARD
:
2119 * Here we use the endpoints shutdown guard timer usually
2122 if ((inp
== NULL
) || (stcb
== NULL
)) {
2125 to_ticks
= inp
->sctp_ep
.sctp_timeoutticks
[SCTP_TIMER_MAXSHUTDOWN
];
2126 tmr
= &stcb
->asoc
.shut_guard_timer
;
2128 case SCTP_TIMER_TYPE_STRRESET
:
2130 * Here the timer comes from the stcb but its value is from
2133 if ((stcb
== NULL
) || (net
== NULL
)) {
2136 if (net
->RTO
== 0) {
2137 to_ticks
= MSEC_TO_TICKS(stcb
->asoc
.initial_rto
);
2139 to_ticks
= MSEC_TO_TICKS(net
->RTO
);
2141 tmr
= &stcb
->asoc
.strreset_timer
;
2144 case SCTP_TIMER_TYPE_EARLYFR
:
2148 if ((stcb
== NULL
) || (net
== NULL
)) {
2151 if (net
->flight_size
> net
->cwnd
) {
2152 /* no need to start */
2155 SCTP_STAT_INCR(sctps_earlyfrstart
);
2156 if (net
->lastsa
== 0) {
2157 /* Hmm no rtt estimate yet? */
2158 msec
= stcb
->asoc
.initial_rto
>> 2;
2160 msec
= ((net
->lastsa
>> 2) + net
->lastsv
) >> 1;
2162 if (msec
< SCTP_BASE_SYSCTL(sctp_early_fr_msec
)) {
2163 msec
= SCTP_BASE_SYSCTL(sctp_early_fr_msec
);
2164 if (msec
< SCTP_MINFR_MSEC_FLOOR
) {
2165 msec
= SCTP_MINFR_MSEC_FLOOR
;
2168 to_ticks
= MSEC_TO_TICKS(msec
);
2169 tmr
= &net
->fr_timer
;
2172 case SCTP_TIMER_TYPE_ASCONF
:
2174 * Here the timer comes from the stcb but its value is from
2177 if ((stcb
== NULL
) || (net
== NULL
)) {
2180 if (net
->RTO
== 0) {
2181 to_ticks
= MSEC_TO_TICKS(stcb
->asoc
.initial_rto
);
2183 to_ticks
= MSEC_TO_TICKS(net
->RTO
);
2185 tmr
= &stcb
->asoc
.asconf_timer
;
2187 case SCTP_TIMER_TYPE_PRIM_DELETED
:
2188 if ((stcb
== NULL
) || (net
!= NULL
)) {
2191 to_ticks
= MSEC_TO_TICKS(stcb
->asoc
.initial_rto
);
2192 tmr
= &stcb
->asoc
.delete_prim_timer
;
2194 case SCTP_TIMER_TYPE_AUTOCLOSE
:
2198 if (stcb
->asoc
.sctp_autoclose_ticks
== 0) {
2200 * Really an error since stcb is NOT set to
2205 to_ticks
= stcb
->asoc
.sctp_autoclose_ticks
;
2206 tmr
= &stcb
->asoc
.autoclose_timer
;
2209 SCTPDBG(SCTP_DEBUG_TIMER1
, "%s: Unknown timer type %d\n",
2210 __FUNCTION__
, t_type
);
2214 if ((to_ticks
<= 0) || (tmr
== NULL
)) {
2215 SCTPDBG(SCTP_DEBUG_TIMER1
, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2216 __FUNCTION__
, t_type
, to_ticks
, tmr
);
2219 if (SCTP_OS_TIMER_PENDING(&tmr
->timer
)) {
2221 * we do NOT allow you to have it already running. if it is
2222 * we leave the current one up unchanged
2226 /* At this point we can proceed */
2227 if (t_type
== SCTP_TIMER_TYPE_SEND
) {
2228 stcb
->asoc
.num_send_timers_up
++;
2230 tmr
->stopped_from
= 0;
2232 tmr
->ep
= (void *)inp
;
2233 tmr
->tcb
= (void *)stcb
;
2234 tmr
->net
= (void *)net
;
2235 tmr
->self
= (void *)tmr
;
2236 tmr
->ticks
= sctp_get_tick_count();
2237 (void)SCTP_OS_TIMER_START(&tmr
->timer
, to_ticks
, sctp_timeout_handler
, tmr
);
2242 sctp_timer_stop(int t_type
, struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
,
2243 struct sctp_nets
*net
, uint32_t from
)
2245 struct sctp_timer
*tmr
;
2247 if ((t_type
!= SCTP_TIMER_TYPE_ADDR_WQ
) &&
2253 SCTP_TCB_LOCK_ASSERT(stcb
);
2256 case SCTP_TIMER_TYPE_ZERO_COPY
:
2257 tmr
= &inp
->sctp_ep
.zero_copy_timer
;
2259 case SCTP_TIMER_TYPE_ZCOPY_SENDQ
:
2260 tmr
= &inp
->sctp_ep
.zero_copy_sendq_timer
;
2262 case SCTP_TIMER_TYPE_ADDR_WQ
:
2263 tmr
= &SCTP_BASE_INFO(addr_wq_timer
);
2265 case SCTP_TIMER_TYPE_EARLYFR
:
2266 if ((stcb
== NULL
) || (net
== NULL
)) {
2269 tmr
= &net
->fr_timer
;
2270 SCTP_STAT_INCR(sctps_earlyfrstop
);
2272 case SCTP_TIMER_TYPE_ITERATOR
:
2274 struct sctp_iterator
*it
;
2276 it
= (struct sctp_iterator
*)inp
;
2280 case SCTP_TIMER_TYPE_SEND
:
2281 if ((stcb
== NULL
) || (net
== NULL
)) {
2284 tmr
= &net
->rxt_timer
;
2286 case SCTP_TIMER_TYPE_INIT
:
2287 if ((stcb
== NULL
) || (net
== NULL
)) {
2290 tmr
= &net
->rxt_timer
;
2292 case SCTP_TIMER_TYPE_RECV
:
2296 tmr
= &stcb
->asoc
.dack_timer
;
2298 case SCTP_TIMER_TYPE_SHUTDOWN
:
2299 if ((stcb
== NULL
) || (net
== NULL
)) {
2302 tmr
= &net
->rxt_timer
;
2304 case SCTP_TIMER_TYPE_HEARTBEAT
:
2308 tmr
= &stcb
->asoc
.hb_timer
;
2310 case SCTP_TIMER_TYPE_COOKIE
:
2311 if ((stcb
== NULL
) || (net
== NULL
)) {
2314 tmr
= &net
->rxt_timer
;
2316 case SCTP_TIMER_TYPE_NEWCOOKIE
:
2317 /* nothing needed but the endpoint here */
2318 tmr
= &inp
->sctp_ep
.signature_change
;
2320 * We re-use the newcookie timer for the INP kill timer. We
2321 * must assure that we do not kill it by accident.
2324 case SCTP_TIMER_TYPE_ASOCKILL
:
2326 * Stop the asoc kill timer.
2331 tmr
= &stcb
->asoc
.strreset_timer
;
2334 case SCTP_TIMER_TYPE_INPKILL
:
2336 * The inp is setup to die. We re-use the signature_chage
2337 * timer since that has stopped and we are in the GONE
2340 tmr
= &inp
->sctp_ep
.signature_change
;
2342 case SCTP_TIMER_TYPE_PATHMTURAISE
:
2343 if ((stcb
== NULL
) || (net
== NULL
)) {
2346 tmr
= &net
->pmtu_timer
;
2348 case SCTP_TIMER_TYPE_SHUTDOWNACK
:
2349 if ((stcb
== NULL
) || (net
== NULL
)) {
2352 tmr
= &net
->rxt_timer
;
2354 case SCTP_TIMER_TYPE_SHUTDOWNGUARD
:
2358 tmr
= &stcb
->asoc
.shut_guard_timer
;
2360 case SCTP_TIMER_TYPE_STRRESET
:
2364 tmr
= &stcb
->asoc
.strreset_timer
;
2366 case SCTP_TIMER_TYPE_ASCONF
:
2370 tmr
= &stcb
->asoc
.asconf_timer
;
2372 case SCTP_TIMER_TYPE_PRIM_DELETED
:
2376 tmr
= &stcb
->asoc
.delete_prim_timer
;
2378 case SCTP_TIMER_TYPE_AUTOCLOSE
:
2382 tmr
= &stcb
->asoc
.autoclose_timer
;
2385 SCTPDBG(SCTP_DEBUG_TIMER1
, "%s: Unknown timer type %d\n",
2386 __FUNCTION__
, t_type
);
2392 if ((tmr
->type
!= t_type
) && tmr
->type
) {
2394 * Ok we have a timer that is under joint use. Cookie timer
2395 * per chance with the SEND timer. We therefore are NOT
2396 * running the timer that the caller wants stopped. So just
2401 if ((t_type
== SCTP_TIMER_TYPE_SEND
) && (stcb
!= NULL
)) {
2402 stcb
->asoc
.num_send_timers_up
--;
2403 if (stcb
->asoc
.num_send_timers_up
< 0) {
2404 stcb
->asoc
.num_send_timers_up
= 0;
2408 tmr
->stopped_from
= from
;
2409 (void)SCTP_OS_TIMER_STOP(&tmr
->timer
);
2413 #ifdef SCTP_USE_ADLER32
2415 update_adler32(uint32_t adler
, uint8_t * buf
, int32_t len
)
2417 uint32_t s1
= adler
& 0xffff;
2418 uint32_t s2
= (adler
>> 16) & 0xffff;
2421 for (n
= 0; n
< len
; n
++, buf
++) {
2422 /* s1 = (s1 + buf[n]) % BASE */
2426 * now if we need to, we do a mod by subtracting. It seems a
2427 * bit faster since I really will only ever do one subtract
2428 * at the MOST, since buf[n] is a max of 255.
2430 if (s1
>= SCTP_ADLER32_BASE
) {
2431 s1
-= SCTP_ADLER32_BASE
;
2433 /* s2 = (s2 + s1) % BASE */
2437 * again, it is more efficent (it seems) to subtract since
2438 * the most s2 will ever be is (BASE-1 + BASE-1) in the
2439 * worse case. This would then be (2 * BASE) - 2, which will
2440 * still only do one subtract. On Intel this is much better
2441 * to do this way and avoid the divide. Have not -pg'd on
2444 if (s2
>= SCTP_ADLER32_BASE
) {
2445 s2
-= SCTP_ADLER32_BASE
;
2448 /* Return the adler32 of the bytes buf[0..len-1] */
2449 return ((s2
<< 16) + s1
);
2456 sctp_calculate_len(struct mbuf
*m
)
2463 tlen
+= SCTP_BUF_LEN(at
);
2464 at
= SCTP_BUF_NEXT(at
);
2469 #if defined(SCTP_WITH_NO_CSUM)
2472 sctp_calculate_sum(struct mbuf
*m
, int32_t * pktlen
, uint32_t offset
)
2475 * given a mbuf chain with a packetheader offset by 'offset'
2476 * pointing at a sctphdr (with csum set to 0) go through the chain
2477 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2478 * has a side bonus as it will calculate the total length of the
2479 * mbuf chain. Note: if offset is greater than the total mbuf
2480 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2484 *pktlen
= sctp_calculate_len(m
);
2488 #elif defined(SCTP_USE_INCHKSUM)
2490 #include <machine/in_cksum.h>
2493 sctp_calculate_sum(struct mbuf
*m
, int32_t * pktlen
, uint32_t offset
)
2496 * given a mbuf chain with a packetheader offset by 'offset'
2497 * pointing at a sctphdr (with csum set to 0) go through the chain
2498 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2499 * has a side bonus as it will calculate the total length of the
2500 * mbuf chain. Note: if offset is greater than the total mbuf
2501 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2505 uint32_t the_sum
, retsum
;
2509 tlen
+= SCTP_BUF_LEN(at
);
2510 at
= SCTP_BUF_NEXT(at
);
2512 the_sum
= (uint32_t) (in_cksum_skip(m
, tlen
, offset
));
2514 *pktlen
= (tlen
- offset
);
2515 retsum
= htons(the_sum
);
2522 sctp_calculate_sum(struct mbuf
*m
, int32_t * pktlen
, uint32_t offset
)
2525 * given a mbuf chain with a packetheader offset by 'offset'
2526 * pointing at a sctphdr (with csum set to 0) go through the chain
2527 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2528 * has a side bonus as it will calculate the total length of the
2529 * mbuf chain. Note: if offset is greater than the total mbuf
2530 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2534 #ifdef SCTP_USE_ADLER32
2538 uint32_t base
= 0xffffffff;
2544 /* find the correct mbuf and offset into mbuf */
2545 while ((at
!= NULL
) && (offset
> (uint32_t) SCTP_BUF_LEN(at
))) {
2546 offset
-= SCTP_BUF_LEN(at
); /* update remaining offset
2548 at
= SCTP_BUF_NEXT(at
);
2550 while (at
!= NULL
) {
2551 if ((SCTP_BUF_LEN(at
) - offset
) > 0) {
2552 #ifdef SCTP_USE_ADLER32
2553 base
= update_adler32(base
,
2554 (unsigned char *)(SCTP_BUF_AT(at
, offset
)),
2555 (unsigned int)(SCTP_BUF_LEN(at
) - offset
));
2557 if ((SCTP_BUF_LEN(at
) - offset
) < 4) {
2558 /* Use old method if less than 4 bytes */
2559 base
= old_update_crc32(base
,
2560 (unsigned char *)(SCTP_BUF_AT(at
, offset
)),
2561 (unsigned int)(SCTP_BUF_LEN(at
) - offset
));
2563 base
= update_crc32(base
,
2564 (unsigned char *)(SCTP_BUF_AT(at
, offset
)),
2565 (unsigned int)(SCTP_BUF_LEN(at
) - offset
));
2568 tlen
+= SCTP_BUF_LEN(at
) - offset
;
2569 /* we only offset once into the first mbuf */
2572 if (offset
< (uint32_t) SCTP_BUF_LEN(at
))
2575 offset
-= SCTP_BUF_LEN(at
);
2577 at
= SCTP_BUF_NEXT(at
);
2579 if (pktlen
!= NULL
) {
2582 #ifdef SCTP_USE_ADLER32
2587 base
= sctp_csum_finalize(base
);
2596 sctp_mtu_size_reset(struct sctp_inpcb
*inp
,
2597 struct sctp_association
*asoc
, uint32_t mtu
)
2600 * Reset the P-MTU size on this association, this involves changing
2601 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2602 * allow the DF flag to be cleared.
2604 struct sctp_tmit_chunk
*chk
;
2605 unsigned int eff_mtu
, ovh
;
2607 #ifdef SCTP_PRINT_FOR_B_AND_M
2608 SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2611 asoc
->smallest_mtu
= mtu
;
2612 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
2613 ovh
= SCTP_MIN_OVERHEAD
;
2615 ovh
= SCTP_MIN_V4_OVERHEAD
;
2617 eff_mtu
= mtu
- ovh
;
2618 TAILQ_FOREACH(chk
, &asoc
->send_queue
, sctp_next
) {
2620 if (chk
->send_size
> eff_mtu
) {
2621 chk
->flags
|= CHUNK_FLAGS_FRAGMENT_OK
;
2624 TAILQ_FOREACH(chk
, &asoc
->sent_queue
, sctp_next
) {
2625 if (chk
->send_size
> eff_mtu
) {
2626 chk
->flags
|= CHUNK_FLAGS_FRAGMENT_OK
;
2633 * given an association and starting time of the current RTT period return
2634 * RTO in number of msecs net should point to the current network
2637 sctp_calculate_rto(struct sctp_tcb
*stcb
,
2638 struct sctp_association
*asoc
,
2639 struct sctp_nets
*net
,
2640 struct timeval
*told
,
2644 * given an association and the starting time of the current RTT
2645 * period (in value1/value2) return RTO in number of msecs.
2649 uint32_t new_rto
= 0;
2650 int first_measure
= 0;
2651 struct timeval now
, then
, *old
;
2653 /* Copy it out for sparc64 */
2654 if (safe
== sctp_align_unsafe_makecopy
) {
2656 memcpy(&then
, told
, sizeof(struct timeval
));
2657 } else if (safe
== sctp_align_safe_nocopy
) {
2661 SCTP_PRINTF("Huh, bad rto calc call\n");
2664 /************************/
2665 /* 1. calculate new RTT */
2666 /************************/
2667 /* get the current time */
2668 (void)SCTP_GETTIME_TIMEVAL(&now
);
2669 /* compute the RTT value */
2670 if ((u_long
)now
.tv_sec
> (u_long
)old
->tv_sec
) {
2671 calc_time
= ((u_long
)now
.tv_sec
- (u_long
)old
->tv_sec
) * 1000;
2672 if ((u_long
)now
.tv_usec
> (u_long
)old
->tv_usec
) {
2673 calc_time
+= (((u_long
)now
.tv_usec
-
2674 (u_long
)old
->tv_usec
) / 1000);
2675 } else if ((u_long
)now
.tv_usec
< (u_long
)old
->tv_usec
) {
2676 /* Borrow 1,000ms from current calculation */
2678 /* Add in the slop over */
2679 calc_time
+= ((int)now
.tv_usec
/ 1000);
2680 /* Add in the pre-second ms's */
2681 calc_time
+= (((int)1000000 - (int)old
->tv_usec
) / 1000);
2683 } else if ((u_long
)now
.tv_sec
== (u_long
)old
->tv_sec
) {
2684 if ((u_long
)now
.tv_usec
> (u_long
)old
->tv_usec
) {
2685 calc_time
= ((u_long
)now
.tv_usec
-
2686 (u_long
)old
->tv_usec
) / 1000;
2687 } else if ((u_long
)now
.tv_usec
< (u_long
)old
->tv_usec
) {
2688 /* impossible .. garbage in nothing out */
2690 } else if ((u_long
)now
.tv_usec
== (u_long
)old
->tv_usec
) {
2692 * We have to have 1 usec :-D this must be the
2697 /* impossible .. garbage in nothing out */
2701 /* Clock wrapped? */
2704 /***************************/
2705 /* 2. update RTTVAR & SRTT */
2706 /***************************/
2707 o_calctime
= calc_time
;
2708 /* this is Van Jacobson's integer version */
2709 if (net
->RTO_measured
) {
2710 calc_time
-= (net
->lastsa
>> SCTP_RTT_SHIFT
); /* take away 1/8th when
2712 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_RTTVAR_LOGGING_ENABLE
) {
2713 rto_logging(net
, SCTP_LOG_RTTVAR
);
2715 net
->prev_rtt
= o_calctime
;
2716 net
->lastsa
+= calc_time
; /* add 7/8th into sa when
2718 if (calc_time
< 0) {
2719 calc_time
= -calc_time
;
2721 calc_time
-= (net
->lastsv
>> SCTP_RTT_VAR_SHIFT
); /* take away 1/4 when
2723 net
->lastsv
+= calc_time
;
2724 if (net
->lastsv
== 0) {
2725 net
->lastsv
= SCTP_CLOCK_GRANULARITY
;
2728 /* First RTO measurment */
2729 net
->RTO_measured
= 1;
2730 net
->lastsa
= calc_time
<< SCTP_RTT_SHIFT
; /* Multiply by 8 when
2732 net
->lastsv
= calc_time
;
2733 if (net
->lastsv
== 0) {
2734 net
->lastsv
= SCTP_CLOCK_GRANULARITY
;
2737 net
->prev_rtt
= o_calctime
;
2738 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_RTTVAR_LOGGING_ENABLE
) {
2739 rto_logging(net
, SCTP_LOG_INITIAL_RTT
);
2743 new_rto
= (net
->lastsa
>> SCTP_RTT_SHIFT
) + net
->lastsv
;
2744 if ((new_rto
> SCTP_SAT_NETWORK_MIN
) &&
2745 (stcb
->asoc
.sat_network_lockout
== 0)) {
2746 stcb
->asoc
.sat_network
= 1;
2747 } else if ((!first_measure
) && stcb
->asoc
.sat_network
) {
2748 stcb
->asoc
.sat_network
= 0;
2749 stcb
->asoc
.sat_network_lockout
= 1;
2751 /* bound it, per C6/C7 in Section 5.3.1 */
2752 if (new_rto
< stcb
->asoc
.minrto
) {
2753 new_rto
= stcb
->asoc
.minrto
;
2755 if (new_rto
> stcb
->asoc
.maxrto
) {
2756 new_rto
= stcb
->asoc
.maxrto
;
2758 /* we are now returning the RTO */
2763 * return a pointer to a contiguous piece of data from the given mbuf chain
2764 * starting at 'off' for 'len' bytes. If the desired piece spans more than
2765 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2766 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2769 sctp_m_getptr(struct mbuf
*m
, int off
, int len
, uint8_t * in_ptr
)
2775 if ((off
< 0) || (len
<= 0))
2778 /* find the desired start location */
2779 while ((m
!= NULL
) && (off
> 0)) {
2780 if (off
< SCTP_BUF_LEN(m
))
2782 off
-= SCTP_BUF_LEN(m
);
2783 m
= SCTP_BUF_NEXT(m
);
2788 /* is the current mbuf large enough (eg. contiguous)? */
2789 if ((SCTP_BUF_LEN(m
) - off
) >= len
) {
2790 return (mtod(m
, caddr_t
)+off
);
2792 /* else, it spans more than one mbuf, so save a temp copy... */
2793 while ((m
!= NULL
) && (len
> 0)) {
2794 count
= min(SCTP_BUF_LEN(m
) - off
, len
);
2795 bcopy(mtod(m
, caddr_t
)+off
, ptr
, count
);
2799 m
= SCTP_BUF_NEXT(m
);
2801 if ((m
== NULL
) && (len
> 0))
2804 return ((caddr_t
)in_ptr
);
2810 struct sctp_paramhdr
*
2811 sctp_get_next_param(struct mbuf
*m
,
2813 struct sctp_paramhdr
*pull
,
2816 /* This just provides a typed signature to Peter's Pull routine */
2817 return ((struct sctp_paramhdr
*)sctp_m_getptr(m
, offset
, pull_limit
,
2823 sctp_add_pad_tombuf(struct mbuf
*m
, int padlen
)
2826 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2827 * padlen is > 3 this routine will fail.
2833 SCTP_LTRACE_ERR_RET_PKT(m
, NULL
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, ENOBUFS
);
2836 if (padlen
<= M_TRAILINGSPACE(m
)) {
2838 * The easy way. We hope the majority of the time we hit
2841 dp
= (uint8_t *) (mtod(m
, caddr_t
)+SCTP_BUF_LEN(m
));
2842 SCTP_BUF_LEN(m
) += padlen
;
2844 /* Hard way we must grow the mbuf */
2847 tmp
= sctp_get_mbuf_for_msg(padlen
, 0, M_DONTWAIT
, 1, MT_DATA
);
2849 /* Out of space GAK! we are in big trouble. */
2850 SCTP_LTRACE_ERR_RET_PKT(m
, NULL
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EINVAL
);
2853 /* setup and insert in middle */
2854 SCTP_BUF_LEN(tmp
) = padlen
;
2855 SCTP_BUF_NEXT(tmp
) = NULL
;
2856 SCTP_BUF_NEXT(m
) = tmp
;
2857 dp
= mtod(tmp
, uint8_t *);
2859 /* zero out the pad */
2860 for (i
= 0; i
< padlen
; i
++) {
2868 sctp_pad_lastmbuf(struct mbuf
*m
, int padval
, struct mbuf
*last_mbuf
)
2870 /* find the last mbuf in chain and pad it */
2875 return (sctp_add_pad_tombuf(last_mbuf
, padval
));
2878 if (SCTP_BUF_NEXT(m_at
) == NULL
) {
2879 return (sctp_add_pad_tombuf(m_at
, padval
));
2881 m_at
= SCTP_BUF_NEXT(m_at
);
2884 SCTP_LTRACE_ERR_RET_PKT(m
, NULL
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EFAULT
);
2888 int sctp_asoc_change_wake
= 0;
2891 sctp_notify_assoc_change(uint32_t event
, struct sctp_tcb
*stcb
,
2892 uint32_t error
, void *data
, int so_locked
2893 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2898 struct mbuf
*m_notify
;
2899 struct sctp_assoc_change
*sac
;
2900 struct sctp_queued_to_read
*control
;
2902 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2908 * First if we are are going down dump everything we can to the
2912 if ((stcb
== NULL
) ||
2913 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) ||
2914 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_ALLGONE
) ||
2915 (stcb
->asoc
.state
& SCTP_STATE_CLOSED_SOCKET
)
2917 /* If the socket is gone we are out of here */
2921 * For TCP model AND UDP connected sockets we will send an error up
2922 * when an ABORT comes in.
2924 if (((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) ||
2925 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)) &&
2926 ((event
== SCTP_COMM_LOST
) || (event
== SCTP_CANT_STR_ASSOC
))) {
2927 if (SCTP_GET_STATE(&stcb
->asoc
) == SCTP_STATE_COOKIE_WAIT
) {
2928 SCTP_LTRACE_ERR_RET(NULL
, stcb
, NULL
, SCTP_FROM_SCTPUTIL
, ECONNREFUSED
);
2929 stcb
->sctp_socket
->so_error
= ECONNREFUSED
;
2931 SCTP_LTRACE_ERR_RET(NULL
, stcb
, NULL
, SCTP_FROM_SCTPUTIL
, ECONNRESET
);
2932 stcb
->sctp_socket
->so_error
= ECONNRESET
;
2934 /* Wake ANY sleepers */
2935 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2936 so
= SCTP_INP_SO(stcb
->sctp_ep
);
2938 atomic_add_int(&stcb
->asoc
.refcnt
, 1);
2939 SCTP_TCB_UNLOCK(stcb
);
2940 SCTP_SOCKET_LOCK(so
, 1);
2941 SCTP_TCB_LOCK(stcb
);
2942 atomic_subtract_int(&stcb
->asoc
.refcnt
, 1);
2943 if (stcb
->asoc
.state
& SCTP_STATE_CLOSED_SOCKET
) {
2944 SCTP_SOCKET_UNLOCK(so
, 1);
2949 sorwakeup(stcb
->sctp_socket
);
2950 sowwakeup(stcb
->sctp_socket
);
2951 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2953 SCTP_SOCKET_UNLOCK(so
, 1);
2956 sctp_asoc_change_wake
++;
2958 if (sctp_is_feature_off(stcb
->sctp_ep
, SCTP_PCB_FLAGS_RECVASSOCEVNT
)) {
2959 /* event not enabled */
2962 m_notify
= sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change
), 0, M_DONTWAIT
, 1, MT_DATA
);
2963 if (m_notify
== NULL
)
2966 SCTP_BUF_LEN(m_notify
) = 0;
2968 sac
= mtod(m_notify
, struct sctp_assoc_change
*);
2969 sac
->sac_type
= SCTP_ASSOC_CHANGE
;
2971 sac
->sac_length
= sizeof(struct sctp_assoc_change
);
2972 sac
->sac_state
= event
;
2973 sac
->sac_error
= error
;
2974 /* XXX verify these stream counts */
2975 sac
->sac_outbound_streams
= stcb
->asoc
.streamoutcnt
;
2976 sac
->sac_inbound_streams
= stcb
->asoc
.streamincnt
;
2977 sac
->sac_assoc_id
= sctp_get_associd(stcb
);
2978 SCTP_BUF_LEN(m_notify
) = sizeof(struct sctp_assoc_change
);
2979 SCTP_BUF_NEXT(m_notify
) = NULL
;
2980 control
= sctp_build_readq_entry(stcb
, stcb
->asoc
.primary_destination
,
2983 if (control
== NULL
) {
2985 sctp_m_freem(m_notify
);
2988 control
->length
= SCTP_BUF_LEN(m_notify
);
2989 /* not that we need this */
2990 control
->tail_mbuf
= m_notify
;
2991 control
->spec_flags
= M_NOTIFICATION
;
2992 sctp_add_to_readq(stcb
->sctp_ep
, stcb
,
2994 &stcb
->sctp_socket
->so_rcv
, 1, so_locked
);
2995 if (event
== SCTP_COMM_LOST
) {
2996 /* Wake up any sleeper */
2997 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2998 so
= SCTP_INP_SO(stcb
->sctp_ep
);
3000 atomic_add_int(&stcb
->asoc
.refcnt
, 1);
3001 SCTP_TCB_UNLOCK(stcb
);
3002 SCTP_SOCKET_LOCK(so
, 1);
3003 SCTP_TCB_LOCK(stcb
);
3004 atomic_subtract_int(&stcb
->asoc
.refcnt
, 1);
3005 if (stcb
->asoc
.state
& SCTP_STATE_CLOSED_SOCKET
) {
3006 SCTP_SOCKET_UNLOCK(so
, 1);
3011 sctp_sowwakeup(stcb
->sctp_ep
, stcb
->sctp_socket
);
3012 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3014 SCTP_SOCKET_UNLOCK(so
, 1);
3021 sctp_notify_peer_addr_change(struct sctp_tcb
*stcb
, uint32_t state
,
3022 struct sockaddr
*sa
, uint32_t error
)
3024 struct mbuf
*m_notify
;
3025 struct sctp_paddr_change
*spc
;
3026 struct sctp_queued_to_read
*control
;
3028 if ((stcb
== NULL
) || (sctp_is_feature_off(stcb
->sctp_ep
, SCTP_PCB_FLAGS_RECVPADDREVNT
)))
3029 /* event not enabled */
3032 m_notify
= sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change
), 0, M_DONTWAIT
, 1, MT_DATA
);
3033 if (m_notify
== NULL
)
3035 SCTP_BUF_LEN(m_notify
) = 0;
3036 spc
= mtod(m_notify
, struct sctp_paddr_change
*);
3037 spc
->spc_type
= SCTP_PEER_ADDR_CHANGE
;
3039 spc
->spc_length
= sizeof(struct sctp_paddr_change
);
3040 switch (sa
->sa_family
) {
3042 memcpy(&spc
->spc_aaddr
, sa
, sizeof(struct sockaddr_in
));
3047 struct sockaddr_in6
*sin6
;
3049 memcpy(&spc
->spc_aaddr
, sa
, sizeof(struct sockaddr_in6
));
3051 sin6
= (struct sockaddr_in6
*)&spc
->spc_aaddr
;
3052 if (IN6_IS_SCOPE_LINKLOCAL(&sin6
->sin6_addr
)) {
3053 if (sin6
->sin6_scope_id
== 0) {
3054 /* recover scope_id for user */
3055 (void)sa6_recoverscope(sin6
);
3057 /* clear embedded scope_id for user */
3058 in6_clearscope(&sin6
->sin6_addr
);
3068 spc
->spc_state
= state
;
3069 spc
->spc_error
= error
;
3070 spc
->spc_assoc_id
= sctp_get_associd(stcb
);
3072 SCTP_BUF_LEN(m_notify
) = sizeof(struct sctp_paddr_change
);
3073 SCTP_BUF_NEXT(m_notify
) = NULL
;
3075 /* append to socket */
3076 control
= sctp_build_readq_entry(stcb
, stcb
->asoc
.primary_destination
,
3079 if (control
== NULL
) {
3081 sctp_m_freem(m_notify
);
3084 control
->length
= SCTP_BUF_LEN(m_notify
);
3085 control
->spec_flags
= M_NOTIFICATION
;
3086 /* not that we need this */
3087 control
->tail_mbuf
= m_notify
;
3088 sctp_add_to_readq(stcb
->sctp_ep
, stcb
,
3090 &stcb
->sctp_socket
->so_rcv
, 1, SCTP_SO_NOT_LOCKED
);
3095 sctp_notify_send_failed(struct sctp_tcb
*stcb
, uint32_t error
,
3096 struct sctp_tmit_chunk
*chk
, int so_locked
3097 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3102 struct mbuf
*m_notify
, *tt
;
3103 struct sctp_send_failed
*ssf
;
3104 struct sctp_queued_to_read
*control
;
3107 if ((stcb
== NULL
) || (sctp_is_feature_off(stcb
->sctp_ep
, SCTP_PCB_FLAGS_RECVSENDFAILEVNT
)))
3108 /* event not enabled */
3111 m_notify
= sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed
), 0, M_DONTWAIT
, 1, MT_DATA
);
3112 if (m_notify
== NULL
)
3115 length
= sizeof(struct sctp_send_failed
) + chk
->send_size
;
3116 length
-= sizeof(struct sctp_data_chunk
);
3117 SCTP_BUF_LEN(m_notify
) = 0;
3118 ssf
= mtod(m_notify
, struct sctp_send_failed
*);
3119 ssf
->ssf_type
= SCTP_SEND_FAILED
;
3120 if (error
== SCTP_NOTIFY_DATAGRAM_UNSENT
)
3121 ssf
->ssf_flags
= SCTP_DATA_UNSENT
;
3123 ssf
->ssf_flags
= SCTP_DATA_SENT
;
3124 ssf
->ssf_length
= length
;
3125 ssf
->ssf_error
= error
;
3126 /* not exactly what the user sent in, but should be close :) */
3127 bzero(&ssf
->ssf_info
, sizeof(ssf
->ssf_info
));
3128 ssf
->ssf_info
.sinfo_stream
= chk
->rec
.data
.stream_number
;
3129 ssf
->ssf_info
.sinfo_ssn
= chk
->rec
.data
.stream_seq
;
3130 ssf
->ssf_info
.sinfo_flags
= chk
->rec
.data
.rcv_flags
;
3131 ssf
->ssf_info
.sinfo_ppid
= chk
->rec
.data
.payloadtype
;
3132 ssf
->ssf_info
.sinfo_context
= chk
->rec
.data
.context
;
3133 ssf
->ssf_info
.sinfo_assoc_id
= sctp_get_associd(stcb
);
3134 ssf
->ssf_assoc_id
= sctp_get_associd(stcb
);
3136 /* Take off the chunk header */
3137 m_adj(chk
->data
, sizeof(struct sctp_data_chunk
));
3139 /* trim out any 0 len mbufs */
3140 while (SCTP_BUF_LEN(chk
->data
) == 0) {
3142 chk
->data
= SCTP_BUF_NEXT(tt
);
3143 SCTP_BUF_NEXT(tt
) = NULL
;
3147 SCTP_BUF_NEXT(m_notify
) = chk
->data
;
3148 SCTP_BUF_LEN(m_notify
) = sizeof(struct sctp_send_failed
);
3150 /* Steal off the mbuf */
3153 * For this case, we check the actual socket buffer, since the assoc
3154 * is going away we don't want to overfill the socket buffer for a
3157 if (sctp_sbspace_failedmsgs(&stcb
->sctp_socket
->so_rcv
) < SCTP_BUF_LEN(m_notify
)) {
3158 sctp_m_freem(m_notify
);
3161 /* append to socket */
3162 control
= sctp_build_readq_entry(stcb
, stcb
->asoc
.primary_destination
,
3165 if (control
== NULL
) {
3167 sctp_m_freem(m_notify
);
3170 control
->spec_flags
= M_NOTIFICATION
;
3171 sctp_add_to_readq(stcb
->sctp_ep
, stcb
,
3173 &stcb
->sctp_socket
->so_rcv
, 1, so_locked
);
3178 sctp_notify_send_failed2(struct sctp_tcb
*stcb
, uint32_t error
,
3179 struct sctp_stream_queue_pending
*sp
, int so_locked
3180 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3185 struct mbuf
*m_notify
;
3186 struct sctp_send_failed
*ssf
;
3187 struct sctp_queued_to_read
*control
;
3190 if ((stcb
== NULL
) || (sctp_is_feature_off(stcb
->sctp_ep
, SCTP_PCB_FLAGS_RECVSENDFAILEVNT
)))
3191 /* event not enabled */
3194 length
= sizeof(struct sctp_send_failed
) + sp
->length
;
3195 m_notify
= sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed
), 0, M_DONTWAIT
, 1, MT_DATA
);
3196 if (m_notify
== NULL
)
3199 SCTP_BUF_LEN(m_notify
) = 0;
3200 ssf
= mtod(m_notify
, struct sctp_send_failed
*);
3201 ssf
->ssf_type
= SCTP_SEND_FAILED
;
3202 if (error
== SCTP_NOTIFY_DATAGRAM_UNSENT
)
3203 ssf
->ssf_flags
= SCTP_DATA_UNSENT
;
3205 ssf
->ssf_flags
= SCTP_DATA_SENT
;
3206 ssf
->ssf_length
= length
;
3207 ssf
->ssf_error
= error
;
3208 /* not exactly what the user sent in, but should be close :) */
3209 bzero(&ssf
->ssf_info
, sizeof(ssf
->ssf_info
));
3210 ssf
->ssf_info
.sinfo_stream
= sp
->stream
;
3211 ssf
->ssf_info
.sinfo_ssn
= sp
->strseq
;
3212 if (sp
->some_taken
) {
3213 ssf
->ssf_info
.sinfo_flags
= SCTP_DATA_LAST_FRAG
;
3215 ssf
->ssf_info
.sinfo_flags
= SCTP_DATA_NOT_FRAG
;
3217 ssf
->ssf_info
.sinfo_ppid
= sp
->ppid
;
3218 ssf
->ssf_info
.sinfo_context
= sp
->context
;
3219 ssf
->ssf_info
.sinfo_assoc_id
= sctp_get_associd(stcb
);
3220 ssf
->ssf_assoc_id
= sctp_get_associd(stcb
);
3221 SCTP_BUF_NEXT(m_notify
) = sp
->data
;
3222 SCTP_BUF_LEN(m_notify
) = sizeof(struct sctp_send_failed
);
3224 /* Steal off the mbuf */
3227 * For this case, we check the actual socket buffer, since the assoc
3228 * is going away we don't want to overfill the socket buffer for a
3231 if (sctp_sbspace_failedmsgs(&stcb
->sctp_socket
->so_rcv
) < SCTP_BUF_LEN(m_notify
)) {
3232 sctp_m_freem(m_notify
);
3235 /* append to socket */
3236 control
= sctp_build_readq_entry(stcb
, stcb
->asoc
.primary_destination
,
3239 if (control
== NULL
) {
3241 sctp_m_freem(m_notify
);
3244 control
->spec_flags
= M_NOTIFICATION
;
3245 sctp_add_to_readq(stcb
->sctp_ep
, stcb
,
3247 &stcb
->sctp_socket
->so_rcv
, 1, so_locked
);
3253 sctp_notify_adaptation_layer(struct sctp_tcb
*stcb
,
3256 struct mbuf
*m_notify
;
3257 struct sctp_adaptation_event
*sai
;
3258 struct sctp_queued_to_read
*control
;
3260 if ((stcb
== NULL
) || (sctp_is_feature_off(stcb
->sctp_ep
, SCTP_PCB_FLAGS_ADAPTATIONEVNT
)))
3261 /* event not enabled */
3264 m_notify
= sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event
), 0, M_DONTWAIT
, 1, MT_DATA
);
3265 if (m_notify
== NULL
)
3268 SCTP_BUF_LEN(m_notify
) = 0;
3269 sai
= mtod(m_notify
, struct sctp_adaptation_event
*);
3270 sai
->sai_type
= SCTP_ADAPTATION_INDICATION
;
3272 sai
->sai_length
= sizeof(struct sctp_adaptation_event
);
3273 sai
->sai_adaptation_ind
= stcb
->asoc
.peers_adaptation
;
3274 sai
->sai_assoc_id
= sctp_get_associd(stcb
);
3276 SCTP_BUF_LEN(m_notify
) = sizeof(struct sctp_adaptation_event
);
3277 SCTP_BUF_NEXT(m_notify
) = NULL
;
3279 /* append to socket */
3280 control
= sctp_build_readq_entry(stcb
, stcb
->asoc
.primary_destination
,
3283 if (control
== NULL
) {
3285 sctp_m_freem(m_notify
);
3288 control
->length
= SCTP_BUF_LEN(m_notify
);
3289 control
->spec_flags
= M_NOTIFICATION
;
3290 /* not that we need this */
3291 control
->tail_mbuf
= m_notify
;
3292 sctp_add_to_readq(stcb
->sctp_ep
, stcb
,
3294 &stcb
->sctp_socket
->so_rcv
, 1, SCTP_SO_NOT_LOCKED
);
3297 /* This always must be called with the read-queue LOCKED in the INP */
3299 sctp_notify_partial_delivery_indication(struct sctp_tcb
*stcb
, uint32_t error
,
3300 int nolock
, uint32_t val
)
3302 struct mbuf
*m_notify
;
3303 struct sctp_pdapi_event
*pdapi
;
3304 struct sctp_queued_to_read
*control
;
3307 if ((stcb
== NULL
) || (stcb
->sctp_socket
== NULL
) ||
3308 sctp_is_feature_off(stcb
->sctp_ep
, SCTP_PCB_FLAGS_PDAPIEVNT
))
3309 /* event not enabled */
3312 m_notify
= sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event
), 0, M_DONTWAIT
, 1, MT_DATA
);
3313 if (m_notify
== NULL
)
3316 SCTP_BUF_LEN(m_notify
) = 0;
3317 pdapi
= mtod(m_notify
, struct sctp_pdapi_event
*);
3318 pdapi
->pdapi_type
= SCTP_PARTIAL_DELIVERY_EVENT
;
3319 pdapi
->pdapi_flags
= 0;
3320 pdapi
->pdapi_length
= sizeof(struct sctp_pdapi_event
);
3321 pdapi
->pdapi_indication
= error
;
3322 pdapi
->pdapi_stream
= (val
>> 16);
3323 pdapi
->pdapi_seq
= (val
& 0x0000ffff);
3324 pdapi
->pdapi_assoc_id
= sctp_get_associd(stcb
);
3326 SCTP_BUF_LEN(m_notify
) = sizeof(struct sctp_pdapi_event
);
3327 SCTP_BUF_NEXT(m_notify
) = NULL
;
3328 control
= sctp_build_readq_entry(stcb
, stcb
->asoc
.primary_destination
,
3331 if (control
== NULL
) {
3333 sctp_m_freem(m_notify
);
3336 control
->spec_flags
= M_NOTIFICATION
;
3337 control
->length
= SCTP_BUF_LEN(m_notify
);
3338 /* not that we need this */
3339 control
->tail_mbuf
= m_notify
;
3340 control
->held_length
= 0;
3341 control
->length
= 0;
3343 SCTP_INP_READ_LOCK(stcb
->sctp_ep
);
3345 sb
= &stcb
->sctp_socket
->so_rcv
;
3346 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SB_LOGGING_ENABLE
) {
3347 sctp_sblog(sb
, control
->do_not_ref_stcb
? NULL
: stcb
, SCTP_LOG_SBALLOC
, SCTP_BUF_LEN(m_notify
));
3349 sctp_sballoc(stcb
, sb
, m_notify
);
3350 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SB_LOGGING_ENABLE
) {
3351 sctp_sblog(sb
, control
->do_not_ref_stcb
? NULL
: stcb
, SCTP_LOG_SBRESULT
, 0);
3353 atomic_add_int(&control
->length
, SCTP_BUF_LEN(m_notify
));
3354 control
->end_added
= 1;
3355 if (stcb
->asoc
.control_pdapi
)
3356 TAILQ_INSERT_AFTER(&stcb
->sctp_ep
->read_queue
, stcb
->asoc
.control_pdapi
, control
, next
);
3358 /* we really should not see this case */
3359 TAILQ_INSERT_TAIL(&stcb
->sctp_ep
->read_queue
, control
, next
);
3362 SCTP_INP_READ_UNLOCK(stcb
->sctp_ep
);
3364 if (stcb
->sctp_ep
&& stcb
->sctp_socket
) {
3365 /* This should always be the case */
3366 sctp_sorwakeup(stcb
->sctp_ep
, stcb
->sctp_socket
);
3371 sctp_notify_shutdown_event(struct sctp_tcb
*stcb
)
3373 struct mbuf
*m_notify
;
3374 struct sctp_shutdown_event
*sse
;
3375 struct sctp_queued_to_read
*control
;
3378 * For TCP model AND UDP connected sockets we will send an error up
3379 * when an SHUTDOWN completes
3384 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) ||
3385 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)) {
3386 /* mark socket closed for read/write and wakeup! */
3387 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3390 so
= SCTP_INP_SO(stcb
->sctp_ep
);
3391 atomic_add_int(&stcb
->asoc
.refcnt
, 1);
3392 SCTP_TCB_UNLOCK(stcb
);
3393 SCTP_SOCKET_LOCK(so
, 1);
3394 SCTP_TCB_LOCK(stcb
);
3395 atomic_subtract_int(&stcb
->asoc
.refcnt
, 1);
3396 if (stcb
->asoc
.state
& SCTP_STATE_CLOSED_SOCKET
) {
3397 SCTP_SOCKET_UNLOCK(so
, 1);
3401 socantsendmore(stcb
->sctp_socket
);
3402 socantrcvmore(stcb
->sctp_socket
);
3403 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3404 SCTP_SOCKET_UNLOCK(so
, 1);
3407 if (sctp_is_feature_off(stcb
->sctp_ep
, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT
))
3408 /* event not enabled */
3411 m_notify
= sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event
), 0, M_DONTWAIT
, 1, MT_DATA
);
3412 if (m_notify
== NULL
)
3415 sse
= mtod(m_notify
, struct sctp_shutdown_event
*);
3416 sse
->sse_type
= SCTP_SHUTDOWN_EVENT
;
3418 sse
->sse_length
= sizeof(struct sctp_shutdown_event
);
3419 sse
->sse_assoc_id
= sctp_get_associd(stcb
);
3421 SCTP_BUF_LEN(m_notify
) = sizeof(struct sctp_shutdown_event
);
3422 SCTP_BUF_NEXT(m_notify
) = NULL
;
3424 /* append to socket */
3425 control
= sctp_build_readq_entry(stcb
, stcb
->asoc
.primary_destination
,
3428 if (control
== NULL
) {
3430 sctp_m_freem(m_notify
);
3433 control
->spec_flags
= M_NOTIFICATION
;
3434 control
->length
= SCTP_BUF_LEN(m_notify
);
3435 /* not that we need this */
3436 control
->tail_mbuf
= m_notify
;
3437 sctp_add_to_readq(stcb
->sctp_ep
, stcb
,
3439 &stcb
->sctp_socket
->so_rcv
, 1, SCTP_SO_NOT_LOCKED
);
3443 sctp_notify_stream_reset(struct sctp_tcb
*stcb
,
3444 int number_entries
, uint16_t * list
, int flag
)
3446 struct mbuf
*m_notify
;
3447 struct sctp_queued_to_read
*control
;
3448 struct sctp_stream_reset_event
*strreset
;
3454 if (sctp_is_feature_off(stcb
->sctp_ep
, SCTP_PCB_FLAGS_STREAM_RESETEVNT
))
3455 /* event not enabled */
3458 m_notify
= sctp_get_mbuf_for_msg(MCLBYTES
, 0, M_DONTWAIT
, 1, MT_DATA
);
3459 if (m_notify
== NULL
)
3462 SCTP_BUF_LEN(m_notify
) = 0;
3463 len
= sizeof(struct sctp_stream_reset_event
) + (number_entries
* sizeof(uint16_t));
3464 if (len
> M_TRAILINGSPACE(m_notify
)) {
3465 /* never enough room */
3466 sctp_m_freem(m_notify
);
3469 strreset
= mtod(m_notify
, struct sctp_stream_reset_event
*);
3470 strreset
->strreset_type
= SCTP_STREAM_RESET_EVENT
;
3471 if (number_entries
== 0) {
3472 strreset
->strreset_flags
= flag
| SCTP_STRRESET_ALL_STREAMS
;
3474 strreset
->strreset_flags
= flag
| SCTP_STRRESET_STREAM_LIST
;
3476 strreset
->strreset_length
= len
;
3477 strreset
->strreset_assoc_id
= sctp_get_associd(stcb
);
3478 if (number_entries
) {
3481 for (i
= 0; i
< number_entries
; i
++) {
3482 strreset
->strreset_list
[i
] = ntohs(list
[i
]);
3485 SCTP_BUF_LEN(m_notify
) = len
;
3486 SCTP_BUF_NEXT(m_notify
) = NULL
;
3487 if (sctp_sbspace(&stcb
->asoc
, &stcb
->sctp_socket
->so_rcv
) < SCTP_BUF_LEN(m_notify
)) {
3489 sctp_m_freem(m_notify
);
3492 /* append to socket */
3493 control
= sctp_build_readq_entry(stcb
, stcb
->asoc
.primary_destination
,
3496 if (control
== NULL
) {
3498 sctp_m_freem(m_notify
);
3501 control
->spec_flags
= M_NOTIFICATION
;
3502 control
->length
= SCTP_BUF_LEN(m_notify
);
3503 /* not that we need this */
3504 control
->tail_mbuf
= m_notify
;
3505 sctp_add_to_readq(stcb
->sctp_ep
, stcb
,
3507 &stcb
->sctp_socket
->so_rcv
, 1, SCTP_SO_NOT_LOCKED
);
3512 sctp_ulp_notify(uint32_t notification
, struct sctp_tcb
*stcb
,
3513 uint32_t error
, void *data
, int so_locked
3514 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3523 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) ||
3524 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_ALLGONE
) ||
3525 (stcb
->asoc
.state
& SCTP_STATE_CLOSED_SOCKET
)
3527 /* No notifications up when we are in a no socket state */
3530 if (stcb
->asoc
.state
& SCTP_STATE_CLOSED_SOCKET
) {
3531 /* Can't send up to a closed socket any notifications */
3534 if (stcb
&& ((stcb
->asoc
.state
& SCTP_STATE_COOKIE_WAIT
) ||
3535 (stcb
->asoc
.state
& SCTP_STATE_COOKIE_ECHOED
))) {
3536 if ((notification
== SCTP_NOTIFY_INTERFACE_DOWN
) ||
3537 (notification
== SCTP_NOTIFY_INTERFACE_UP
) ||
3538 (notification
== SCTP_NOTIFY_INTERFACE_CONFIRMED
)) {
3539 /* Don't report these in front states */
3543 switch (notification
) {
3544 case SCTP_NOTIFY_ASSOC_UP
:
3545 if (stcb
->asoc
.assoc_up_sent
== 0) {
3546 sctp_notify_assoc_change(SCTP_COMM_UP
, stcb
, error
, NULL
, so_locked
);
3547 stcb
->asoc
.assoc_up_sent
= 1;
3549 if (stcb
->asoc
.adaptation_needed
&& (stcb
->asoc
.adaptation_sent
== 0)) {
3550 sctp_notify_adaptation_layer(stcb
, error
);
3553 case SCTP_NOTIFY_ASSOC_DOWN
:
3554 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP
, stcb
, error
, NULL
, so_locked
);
3556 case SCTP_NOTIFY_INTERFACE_DOWN
:
3558 struct sctp_nets
*net
;
3560 net
= (struct sctp_nets
*)data
;
3561 sctp_notify_peer_addr_change(stcb
, SCTP_ADDR_UNREACHABLE
,
3562 (struct sockaddr
*)&net
->ro
._l_addr
, error
);
3565 case SCTP_NOTIFY_INTERFACE_UP
:
3567 struct sctp_nets
*net
;
3569 net
= (struct sctp_nets
*)data
;
3570 sctp_notify_peer_addr_change(stcb
, SCTP_ADDR_AVAILABLE
,
3571 (struct sockaddr
*)&net
->ro
._l_addr
, error
);
3574 case SCTP_NOTIFY_INTERFACE_CONFIRMED
:
3576 struct sctp_nets
*net
;
3578 net
= (struct sctp_nets
*)data
;
3579 sctp_notify_peer_addr_change(stcb
, SCTP_ADDR_CONFIRMED
,
3580 (struct sockaddr
*)&net
->ro
._l_addr
, error
);
3583 case SCTP_NOTIFY_SPECIAL_SP_FAIL
:
3584 sctp_notify_send_failed2(stcb
, error
,
3585 (struct sctp_stream_queue_pending
*)data
, so_locked
);
3587 case SCTP_NOTIFY_DG_FAIL
:
3588 sctp_notify_send_failed(stcb
, error
,
3589 (struct sctp_tmit_chunk
*)data
, so_locked
);
3591 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION
:
3595 val
= *((uint32_t *) data
);
3597 sctp_notify_partial_delivery_indication(stcb
, error
, 0, val
);
3600 case SCTP_NOTIFY_STRDATA_ERR
:
3602 case SCTP_NOTIFY_ASSOC_ABORTED
:
3603 if ((stcb
) && (((stcb
->asoc
.state
& SCTP_STATE_MASK
) == SCTP_STATE_COOKIE_WAIT
) ||
3604 ((stcb
->asoc
.state
& SCTP_STATE_MASK
) == SCTP_STATE_COOKIE_ECHOED
))) {
3605 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC
, stcb
, error
, NULL
, so_locked
);
3607 sctp_notify_assoc_change(SCTP_COMM_LOST
, stcb
, error
, NULL
, so_locked
);
3610 case SCTP_NOTIFY_PEER_OPENED_STREAM
:
3612 case SCTP_NOTIFY_STREAM_OPENED_OK
:
3614 case SCTP_NOTIFY_ASSOC_RESTART
:
3615 sctp_notify_assoc_change(SCTP_RESTART
, stcb
, error
, data
, so_locked
);
3617 case SCTP_NOTIFY_HB_RESP
:
3619 case SCTP_NOTIFY_STR_RESET_SEND
:
3620 sctp_notify_stream_reset(stcb
, error
, ((uint16_t *) data
), SCTP_STRRESET_OUTBOUND_STR
);
3622 case SCTP_NOTIFY_STR_RESET_RECV
:
3623 sctp_notify_stream_reset(stcb
, error
, ((uint16_t *) data
), SCTP_STRRESET_INBOUND_STR
);
3625 case SCTP_NOTIFY_STR_RESET_FAILED_OUT
:
3626 sctp_notify_stream_reset(stcb
, error
, ((uint16_t *) data
), (SCTP_STRRESET_OUTBOUND_STR
| SCTP_STRRESET_FAILED
));
3629 case SCTP_NOTIFY_STR_RESET_FAILED_IN
:
3630 sctp_notify_stream_reset(stcb
, error
, ((uint16_t *) data
), (SCTP_STRRESET_INBOUND_STR
| SCTP_STRRESET_FAILED
));
3633 case SCTP_NOTIFY_ASCONF_ADD_IP
:
3634 sctp_notify_peer_addr_change(stcb
, SCTP_ADDR_ADDED
, data
,
3637 case SCTP_NOTIFY_ASCONF_DELETE_IP
:
3638 sctp_notify_peer_addr_change(stcb
, SCTP_ADDR_REMOVED
, data
,
3641 case SCTP_NOTIFY_ASCONF_SET_PRIMARY
:
3642 sctp_notify_peer_addr_change(stcb
, SCTP_ADDR_MADE_PRIM
, data
,
3645 case SCTP_NOTIFY_ASCONF_SUCCESS
:
3647 case SCTP_NOTIFY_ASCONF_FAILED
:
3649 case SCTP_NOTIFY_PEER_SHUTDOWN
:
3650 sctp_notify_shutdown_event(stcb
);
3652 case SCTP_NOTIFY_AUTH_NEW_KEY
:
3653 sctp_notify_authentication(stcb
, SCTP_AUTH_NEWKEY
, error
,
3654 (uint16_t) (uintptr_t) data
);
3657 case SCTP_NOTIFY_AUTH_KEY_CONFLICT
:
3658 sctp_notify_authentication(stcb
, SCTP_AUTH_KEY_CONFLICT
,
3659 error
, (uint16_t) (uintptr_t) data
);
3661 #endif /* not yet? remove? */
3665 SCTPDBG(SCTP_DEBUG_UTIL1
, "%s: unknown notification %xh (%u)\n",
3666 __FUNCTION__
, notification
, notification
);
3672 sctp_report_all_outbound(struct sctp_tcb
*stcb
, int holds_lock
, int so_locked
3673 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3678 struct sctp_association
*asoc
;
3679 struct sctp_stream_out
*outs
;
3680 struct sctp_tmit_chunk
*chk
;
3681 struct sctp_stream_queue_pending
*sp
;
3689 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) ||
3690 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_ALLGONE
) ||
3691 (stcb
->asoc
.state
& SCTP_STATE_CLOSED_SOCKET
)) {
3694 /* now through all the gunk freeing chunks */
3695 if (holds_lock
== 0) {
3696 SCTP_TCB_SEND_LOCK(stcb
);
3698 /* sent queue SHOULD be empty */
3699 if (!TAILQ_EMPTY(&asoc
->sent_queue
)) {
3700 chk
= TAILQ_FIRST(&asoc
->sent_queue
);
3702 TAILQ_REMOVE(&asoc
->sent_queue
, chk
, sctp_next
);
3703 asoc
->sent_queue_cnt
--;
3706 * trim off the sctp chunk header(it should
3709 if (chk
->send_size
>= sizeof(struct sctp_data_chunk
)) {
3710 m_adj(chk
->data
, sizeof(struct sctp_data_chunk
));
3711 sctp_mbuf_crush(chk
->data
);
3712 chk
->send_size
-= sizeof(struct sctp_data_chunk
);
3715 sctp_free_bufspace(stcb
, asoc
, chk
, 1);
3716 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL
, stcb
,
3717 SCTP_NOTIFY_DATAGRAM_SENT
, chk
, so_locked
);
3719 sctp_m_freem(chk
->data
);
3722 sctp_free_a_chunk(stcb
, chk
);
3723 /* sa_ignore FREED_MEMORY */
3724 chk
= TAILQ_FIRST(&asoc
->sent_queue
);
3727 /* pending send queue SHOULD be empty */
3728 if (!TAILQ_EMPTY(&asoc
->send_queue
)) {
3729 chk
= TAILQ_FIRST(&asoc
->send_queue
);
3731 TAILQ_REMOVE(&asoc
->send_queue
, chk
, sctp_next
);
3732 asoc
->send_queue_cnt
--;
3735 * trim off the sctp chunk header(it should
3738 if (chk
->send_size
>= sizeof(struct sctp_data_chunk
)) {
3739 m_adj(chk
->data
, sizeof(struct sctp_data_chunk
));
3740 sctp_mbuf_crush(chk
->data
);
3741 chk
->send_size
-= sizeof(struct sctp_data_chunk
);
3744 sctp_free_bufspace(stcb
, asoc
, chk
, 1);
3745 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL
, stcb
, SCTP_NOTIFY_DATAGRAM_UNSENT
, chk
, so_locked
);
3747 sctp_m_freem(chk
->data
);
3750 sctp_free_a_chunk(stcb
, chk
);
3751 /* sa_ignore FREED_MEMORY */
3752 chk
= TAILQ_FIRST(&asoc
->send_queue
);
3755 for (i
= 0; i
< stcb
->asoc
.streamoutcnt
; i
++) {
3756 /* For each stream */
3757 outs
= &stcb
->asoc
.strmout
[i
];
3758 /* clean up any sends there */
3759 stcb
->asoc
.locked_on_sending
= NULL
;
3760 sp
= TAILQ_FIRST(&outs
->outqueue
);
3762 stcb
->asoc
.stream_queue_cnt
--;
3763 TAILQ_REMOVE(&outs
->outqueue
, sp
, next
);
3764 sctp_free_spbufspace(stcb
, asoc
, sp
);
3765 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL
, stcb
,
3766 SCTP_NOTIFY_DATAGRAM_UNSENT
, (void *)sp
, so_locked
);
3768 sctp_m_freem(sp
->data
);
3772 sctp_free_remote_addr(sp
->net
);
3774 /* Free the chunk */
3775 sctp_free_a_strmoq(stcb
, sp
);
3776 /* sa_ignore FREED_MEMORY */
3777 sp
= TAILQ_FIRST(&outs
->outqueue
);
3781 if (holds_lock
== 0) {
3782 SCTP_TCB_SEND_UNLOCK(stcb
);
3787 sctp_abort_notification(struct sctp_tcb
*stcb
, int error
, int so_locked
3788 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3797 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) ||
3798 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_ALLGONE
) ||
3799 (stcb
->asoc
.state
& SCTP_STATE_CLOSED_SOCKET
)) {
3802 /* Tell them we lost the asoc */
3803 sctp_report_all_outbound(stcb
, 1, so_locked
);
3804 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
) ||
3805 ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) &&
3806 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_CONNECTED
))) {
3807 stcb
->sctp_ep
->sctp_flags
|= SCTP_PCB_FLAGS_WAS_ABORTED
;
3809 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED
, stcb
, error
, NULL
, so_locked
);
3813 sctp_abort_association(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
,
3814 struct mbuf
*m
, int iphlen
, struct sctphdr
*sh
, struct mbuf
*op_err
,
3815 uint32_t vrf_id
, uint16_t port
)
3819 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3826 /* We have a TCB to abort, send notification too */
3827 vtag
= stcb
->asoc
.peer_vtag
;
3828 sctp_abort_notification(stcb
, 0, SCTP_SO_NOT_LOCKED
);
3829 /* get the assoc vrf id and table id */
3830 vrf_id
= stcb
->asoc
.vrf_id
;
3831 stcb
->asoc
.state
|= SCTP_STATE_WAS_ABORTED
;
3833 sctp_send_abort(m
, iphlen
, sh
, vtag
, op_err
, vrf_id
, port
);
3835 /* Ok, now lets free it */
3836 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3837 so
= SCTP_INP_SO(inp
);
3838 atomic_add_int(&stcb
->asoc
.refcnt
, 1);
3839 SCTP_TCB_UNLOCK(stcb
);
3840 SCTP_SOCKET_LOCK(so
, 1);
3841 SCTP_TCB_LOCK(stcb
);
3842 atomic_subtract_int(&stcb
->asoc
.refcnt
, 1);
3844 (void)sctp_free_assoc(inp
, stcb
, SCTP_NORMAL_PROC
, SCTP_FROM_SCTPUTIL
+ SCTP_LOC_4
);
3845 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3846 SCTP_SOCKET_UNLOCK(so
, 1);
3849 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) {
3850 if (LIST_FIRST(&inp
->sctp_asoc_list
) == NULL
) {
3851 sctp_inpcb_free(inp
, SCTP_FREE_SHOULD_USE_ABORT
,
3852 SCTP_CALLED_DIRECTLY_NOCMPSET
);
3858 #ifdef SCTP_ASOCLOG_OF_TSNS
3860 sctp_print_out_track_log(struct sctp_tcb
*stcb
)
3865 SCTP_PRINTF("Last ep reason:%x\n", stcb
->sctp_ep
->last_abort_code
);
3866 SCTP_PRINTF("IN bound TSN log-aaa\n");
3867 if ((stcb
->asoc
.tsn_in_at
== 0) && (stcb
->asoc
.tsn_in_wrapped
== 0)) {
3868 SCTP_PRINTF("None rcvd\n");
3871 if (stcb
->asoc
.tsn_in_wrapped
) {
3872 for (i
= stcb
->asoc
.tsn_in_at
; i
< SCTP_TSN_LOG_SIZE
; i
++) {
3873 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3874 stcb
->asoc
.in_tsnlog
[i
].tsn
,
3875 stcb
->asoc
.in_tsnlog
[i
].strm
,
3876 stcb
->asoc
.in_tsnlog
[i
].seq
,
3877 stcb
->asoc
.in_tsnlog
[i
].flgs
,
3878 stcb
->asoc
.in_tsnlog
[i
].sz
);
3881 if (stcb
->asoc
.tsn_in_at
) {
3882 for (i
= 0; i
< stcb
->asoc
.tsn_in_at
; i
++) {
3883 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3884 stcb
->asoc
.in_tsnlog
[i
].tsn
,
3885 stcb
->asoc
.in_tsnlog
[i
].strm
,
3886 stcb
->asoc
.in_tsnlog
[i
].seq
,
3887 stcb
->asoc
.in_tsnlog
[i
].flgs
,
3888 stcb
->asoc
.in_tsnlog
[i
].sz
);
3892 SCTP_PRINTF("OUT bound TSN log-aaa\n");
3893 if ((stcb
->asoc
.tsn_out_at
== 0) &&
3894 (stcb
->asoc
.tsn_out_wrapped
== 0)) {
3895 SCTP_PRINTF("None sent\n");
3897 if (stcb
->asoc
.tsn_out_wrapped
) {
3898 for (i
= stcb
->asoc
.tsn_out_at
; i
< SCTP_TSN_LOG_SIZE
; i
++) {
3899 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3900 stcb
->asoc
.out_tsnlog
[i
].tsn
,
3901 stcb
->asoc
.out_tsnlog
[i
].strm
,
3902 stcb
->asoc
.out_tsnlog
[i
].seq
,
3903 stcb
->asoc
.out_tsnlog
[i
].flgs
,
3904 stcb
->asoc
.out_tsnlog
[i
].sz
);
3907 if (stcb
->asoc
.tsn_out_at
) {
3908 for (i
= 0; i
< stcb
->asoc
.tsn_out_at
; i
++) {
3909 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3910 stcb
->asoc
.out_tsnlog
[i
].tsn
,
3911 stcb
->asoc
.out_tsnlog
[i
].strm
,
3912 stcb
->asoc
.out_tsnlog
[i
].seq
,
3913 stcb
->asoc
.out_tsnlog
[i
].flgs
,
3914 stcb
->asoc
.out_tsnlog
[i
].sz
);
3923 sctp_abort_an_association(struct sctp_inpcb
*inp
, struct sctp_tcb
*stcb
,
3924 int error
, struct mbuf
*op_err
,
3926 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3933 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3938 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3939 so
= SCTP_INP_SO(inp
);
3942 /* Got to have a TCB */
3943 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) {
3944 if (LIST_FIRST(&inp
->sctp_asoc_list
) == NULL
) {
3945 sctp_inpcb_free(inp
, SCTP_FREE_SHOULD_USE_ABORT
,
3946 SCTP_CALLED_DIRECTLY_NOCMPSET
);
3951 stcb
->asoc
.state
|= SCTP_STATE_WAS_ABORTED
;
3953 vtag
= stcb
->asoc
.peer_vtag
;
3954 /* notify the ulp */
3955 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) == 0)
3956 sctp_abort_notification(stcb
, error
, so_locked
);
3957 /* notify the peer */
3958 #if defined(SCTP_PANIC_ON_ABORT)
3959 panic("aborting an association");
3961 sctp_send_abort_tcb(stcb
, op_err
, so_locked
);
3962 SCTP_STAT_INCR_COUNTER32(sctps_aborted
);
3963 if ((SCTP_GET_STATE(&stcb
->asoc
) == SCTP_STATE_OPEN
) ||
3964 (SCTP_GET_STATE(&stcb
->asoc
) == SCTP_STATE_SHUTDOWN_RECEIVED
)) {
3965 SCTP_STAT_DECR_GAUGE32(sctps_currestab
);
3967 /* now free the asoc */
3968 #ifdef SCTP_ASOCLOG_OF_TSNS
3969 sctp_print_out_track_log(stcb
);
3971 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3973 atomic_add_int(&stcb
->asoc
.refcnt
, 1);
3974 SCTP_TCB_UNLOCK(stcb
);
3975 SCTP_SOCKET_LOCK(so
, 1);
3976 SCTP_TCB_LOCK(stcb
);
3977 atomic_subtract_int(&stcb
->asoc
.refcnt
, 1);
3980 (void)sctp_free_assoc(inp
, stcb
, SCTP_NORMAL_PROC
, SCTP_FROM_SCTPUTIL
+ SCTP_LOC_5
);
3981 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3983 SCTP_SOCKET_UNLOCK(so
, 1);
3989 sctp_handle_ootb(struct mbuf
*m
, int iphlen
, int offset
, struct sctphdr
*sh
,
3990 struct sctp_inpcb
*inp
, struct mbuf
*op_err
, uint32_t vrf_id
, uint16_t port
)
3992 struct sctp_chunkhdr
*ch
, chunk_buf
;
3993 unsigned int chk_length
;
3995 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue
);
3996 /* Generate a TO address for future reference */
3997 if (inp
&& (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
)) {
3998 if (LIST_FIRST(&inp
->sctp_asoc_list
) == NULL
) {
3999 sctp_inpcb_free(inp
, SCTP_FREE_SHOULD_USE_ABORT
,
4000 SCTP_CALLED_DIRECTLY_NOCMPSET
);
4003 ch
= (struct sctp_chunkhdr
*)sctp_m_getptr(m
, offset
,
4004 sizeof(*ch
), (uint8_t *) & chunk_buf
);
4005 while (ch
!= NULL
) {
4006 chk_length
= ntohs(ch
->chunk_length
);
4007 if (chk_length
< sizeof(*ch
)) {
4008 /* break to abort land */
4011 switch (ch
->chunk_type
) {
4012 case SCTP_COOKIE_ECHO
:
4013 /* We hit here only if the assoc is being freed */
4015 case SCTP_PACKET_DROPPED
:
4016 /* we don't respond to pkt-dropped */
4018 case SCTP_ABORT_ASSOCIATION
:
4019 /* we don't respond with an ABORT to an ABORT */
4021 case SCTP_SHUTDOWN_COMPLETE
:
4023 * we ignore it since we are not waiting for it and
4027 case SCTP_SHUTDOWN_ACK
:
4028 sctp_send_shutdown_complete2(m
, iphlen
, sh
, vrf_id
, port
);
4033 offset
+= SCTP_SIZE32(chk_length
);
4034 ch
= (struct sctp_chunkhdr
*)sctp_m_getptr(m
, offset
,
4035 sizeof(*ch
), (uint8_t *) & chunk_buf
);
4037 sctp_send_abort(m
, iphlen
, sh
, 0, op_err
, vrf_id
, port
);
4041 * check the inbound datagram to make sure there is not an abort inside it,
4042 * if there is return 1, else return 0.
4045 sctp_is_there_an_abort_here(struct mbuf
*m
, int iphlen
, uint32_t * vtagfill
)
4047 struct sctp_chunkhdr
*ch
;
4048 struct sctp_init_chunk
*init_chk
, chunk_buf
;
4050 unsigned int chk_length
;
4052 offset
= iphlen
+ sizeof(struct sctphdr
);
4053 ch
= (struct sctp_chunkhdr
*)sctp_m_getptr(m
, offset
, sizeof(*ch
),
4054 (uint8_t *) & chunk_buf
);
4055 while (ch
!= NULL
) {
4056 chk_length
= ntohs(ch
->chunk_length
);
4057 if (chk_length
< sizeof(*ch
)) {
4058 /* packet is probably corrupt */
4061 /* we seem to be ok, is it an abort? */
4062 if (ch
->chunk_type
== SCTP_ABORT_ASSOCIATION
) {
4063 /* yep, tell them */
4066 if (ch
->chunk_type
== SCTP_INITIATION
) {
4067 /* need to update the Vtag */
4068 init_chk
= (struct sctp_init_chunk
*)sctp_m_getptr(m
,
4069 offset
, sizeof(*init_chk
), (uint8_t *) & chunk_buf
);
4070 if (init_chk
!= NULL
) {
4071 *vtagfill
= ntohl(init_chk
->init
.initiate_tag
);
4074 /* Nope, move to the next chunk */
4075 offset
+= SCTP_SIZE32(chk_length
);
4076 ch
= (struct sctp_chunkhdr
*)sctp_m_getptr(m
, offset
,
4077 sizeof(*ch
), (uint8_t *) & chunk_buf
);
4083 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4084 * set (i.e. it's 0) so, create this function to compare link local scopes
4088 sctp_is_same_scope(struct sockaddr_in6
*addr1
, struct sockaddr_in6
*addr2
)
4090 struct sockaddr_in6 a
, b
;
4096 if (a
.sin6_scope_id
== 0)
4097 if (sa6_recoverscope(&a
)) {
4098 /* can't get scope, so can't match */
4101 if (b
.sin6_scope_id
== 0)
4102 if (sa6_recoverscope(&b
)) {
4103 /* can't get scope, so can't match */
4106 if (a
.sin6_scope_id
!= b
.sin6_scope_id
)
4113 * returns a sockaddr_in6 with embedded scope recovered and removed
4115 struct sockaddr_in6
*
4116 sctp_recover_scope(struct sockaddr_in6
*addr
, struct sockaddr_in6
*store
)
4118 /* check and strip embedded scope junk */
4119 if (addr
->sin6_family
== AF_INET6
) {
4120 if (IN6_IS_SCOPE_LINKLOCAL(&addr
->sin6_addr
)) {
4121 if (addr
->sin6_scope_id
== 0) {
4123 if (!sa6_recoverscope(store
)) {
4124 /* use the recovered scope */
4128 /* else, return the original "to" addr */
4129 in6_clearscope(&addr
->sin6_addr
);
4139 * are the two addresses the same? currently a "scopeless" check returns: 1
4143 sctp_cmpaddr(struct sockaddr
*sa1
, struct sockaddr
*sa2
)
4147 if (sa1
== NULL
|| sa2
== NULL
)
4150 /* must be the same family */
4151 if (sa1
->sa_family
!= sa2
->sa_family
)
4154 switch (sa1
->sa_family
) {
4158 /* IPv6 addresses */
4159 struct sockaddr_in6
*sin6_1
, *sin6_2
;
4161 sin6_1
= (struct sockaddr_in6
*)sa1
;
4162 sin6_2
= (struct sockaddr_in6
*)sa2
;
4163 return (SCTP6_ARE_ADDR_EQUAL(sin6_1
,
4169 /* IPv4 addresses */
4170 struct sockaddr_in
*sin_1
, *sin_2
;
4172 sin_1
= (struct sockaddr_in
*)sa1
;
4173 sin_2
= (struct sockaddr_in
*)sa2
;
4174 return (sin_1
->sin_addr
.s_addr
== sin_2
->sin_addr
.s_addr
);
4177 /* we don't do these... */
4183 sctp_print_address(struct sockaddr
*sa
)
4186 char ip6buf
[INET6_ADDRSTRLEN
];
4191 switch (sa
->sa_family
) {
4195 struct sockaddr_in6
*sin6
;
4197 sin6
= (struct sockaddr_in6
*)sa
;
4198 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4199 ip6_sprintf(ip6buf
, &sin6
->sin6_addr
),
4200 ntohs(sin6
->sin6_port
),
4201 sin6
->sin6_scope_id
);
4207 struct sockaddr_in
*sin
;
4210 sin
= (struct sockaddr_in
*)sa
;
4211 p
= (unsigned char *)&sin
->sin_addr
;
4212 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4213 p
[0], p
[1], p
[2], p
[3], ntohs(sin
->sin_port
));
4223 sctp_print_address_pkt(struct ip
*iph
, struct sctphdr
*sh
)
4225 switch (iph
->ip_v
) {
4228 struct sockaddr_in lsa
, fsa
;
4230 bzero(&lsa
, sizeof(lsa
));
4231 lsa
.sin_len
= sizeof(lsa
);
4232 lsa
.sin_family
= AF_INET
;
4233 lsa
.sin_addr
= iph
->ip_src
;
4234 lsa
.sin_port
= sh
->src_port
;
4235 bzero(&fsa
, sizeof(fsa
));
4236 fsa
.sin_len
= sizeof(fsa
);
4237 fsa
.sin_family
= AF_INET
;
4238 fsa
.sin_addr
= iph
->ip_dst
;
4239 fsa
.sin_port
= sh
->dest_port
;
4240 SCTP_PRINTF("src: ");
4241 sctp_print_address((struct sockaddr
*)&lsa
);
4242 SCTP_PRINTF("dest: ");
4243 sctp_print_address((struct sockaddr
*)&fsa
);
4247 case IPV6_VERSION
>> 4:
4249 struct ip6_hdr
*ip6
;
4250 struct sockaddr_in6 lsa6
, fsa6
;
4252 ip6
= (struct ip6_hdr
*)iph
;
4253 bzero(&lsa6
, sizeof(lsa6
));
4254 lsa6
.sin6_len
= sizeof(lsa6
);
4255 lsa6
.sin6_family
= AF_INET6
;
4256 lsa6
.sin6_addr
= ip6
->ip6_src
;
4257 lsa6
.sin6_port
= sh
->src_port
;
4258 bzero(&fsa6
, sizeof(fsa6
));
4259 fsa6
.sin6_len
= sizeof(fsa6
);
4260 fsa6
.sin6_family
= AF_INET6
;
4261 fsa6
.sin6_addr
= ip6
->ip6_dst
;
4262 fsa6
.sin6_port
= sh
->dest_port
;
4263 SCTP_PRINTF("src: ");
4264 sctp_print_address((struct sockaddr
*)&lsa6
);
4265 SCTP_PRINTF("dest: ");
4266 sctp_print_address((struct sockaddr
*)&fsa6
);
4277 sctp_pull_off_control_to_new_inp(struct sctp_inpcb
*old_inp
,
4278 struct sctp_inpcb
*new_inp
,
4279 struct sctp_tcb
*stcb
,
4283 * go through our old INP and pull off any control structures that
4284 * belong to stcb and move then to the new inp.
4286 struct socket
*old_so
, *new_so
;
4287 struct sctp_queued_to_read
*control
, *nctl
;
4288 struct sctp_readhead tmp_queue
;
4292 old_so
= old_inp
->sctp_socket
;
4293 new_so
= new_inp
->sctp_socket
;
4294 TAILQ_INIT(&tmp_queue
);
4295 error
= sblock(&old_so
->so_rcv
, waitflags
);
4298 * Gak, can't get sblock, we have a problem. data will be
4299 * left stranded.. and we don't dare look at it since the
4300 * other thread may be reading something. Oh well, its a
4301 * screwed up app that does a peeloff OR a accept while
4302 * reading from the main socket... actually its only the
4303 * peeloff() case, since I think read will fail on a
4304 * listening socket..
4308 /* lock the socket buffers */
4309 SCTP_INP_READ_LOCK(old_inp
);
4310 control
= TAILQ_FIRST(&old_inp
->read_queue
);
4311 /* Pull off all for out target stcb */
4313 nctl
= TAILQ_NEXT(control
, next
);
4314 if (control
->stcb
== stcb
) {
4315 /* remove it we want it */
4316 TAILQ_REMOVE(&old_inp
->read_queue
, control
, next
);
4317 TAILQ_INSERT_TAIL(&tmp_queue
, control
, next
);
4320 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SB_LOGGING_ENABLE
) {
4321 sctp_sblog(&old_so
->so_rcv
, control
->do_not_ref_stcb
? NULL
: stcb
, SCTP_LOG_SBFREE
, SCTP_BUF_LEN(m
));
4323 sctp_sbfree(control
, stcb
, &old_so
->so_rcv
, m
);
4324 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SB_LOGGING_ENABLE
) {
4325 sctp_sblog(&old_so
->so_rcv
, control
->do_not_ref_stcb
? NULL
: stcb
, SCTP_LOG_SBRESULT
, 0);
4327 m
= SCTP_BUF_NEXT(m
);
4332 SCTP_INP_READ_UNLOCK(old_inp
);
4333 /* Remove the sb-lock on the old socket */
4335 sbunlock(&old_so
->so_rcv
);
4336 /* Now we move them over to the new socket buffer */
4337 control
= TAILQ_FIRST(&tmp_queue
);
4338 SCTP_INP_READ_LOCK(new_inp
);
4340 nctl
= TAILQ_NEXT(control
, next
);
4341 TAILQ_INSERT_TAIL(&new_inp
->read_queue
, control
, next
);
4344 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SB_LOGGING_ENABLE
) {
4345 sctp_sblog(&new_so
->so_rcv
, control
->do_not_ref_stcb
? NULL
: stcb
, SCTP_LOG_SBALLOC
, SCTP_BUF_LEN(m
));
4347 sctp_sballoc(stcb
, &new_so
->so_rcv
, m
);
4348 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SB_LOGGING_ENABLE
) {
4349 sctp_sblog(&new_so
->so_rcv
, control
->do_not_ref_stcb
? NULL
: stcb
, SCTP_LOG_SBRESULT
, 0);
4351 m
= SCTP_BUF_NEXT(m
);
4355 SCTP_INP_READ_UNLOCK(new_inp
);
4360 sctp_add_to_readq(struct sctp_inpcb
*inp
,
4361 struct sctp_tcb
*stcb
,
4362 struct sctp_queued_to_read
*control
,
4366 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4372 * Here we must place the control on the end of the socket read
4373 * queue AND increment sb_cc so that select will work properly on
4376 struct mbuf
*m
, *prev
= NULL
;
4381 panic("Gak, inp NULL on add_to_readq");
4385 SCTP_INP_READ_LOCK(inp
);
4386 if (!(control
->spec_flags
& M_NOTIFICATION
)) {
4387 atomic_add_int(&inp
->total_recvs
, 1);
4388 if (!control
->do_not_ref_stcb
) {
4389 atomic_add_int(&stcb
->total_recvs
, 1);
4393 control
->held_length
= 0;
4394 control
->length
= 0;
4396 if (SCTP_BUF_LEN(m
) == 0) {
4397 /* Skip mbufs with NO length */
4400 control
->data
= sctp_m_free(m
);
4403 SCTP_BUF_NEXT(prev
) = sctp_m_free(m
);
4404 m
= SCTP_BUF_NEXT(prev
);
4407 control
->tail_mbuf
= prev
;;
4412 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SB_LOGGING_ENABLE
) {
4413 sctp_sblog(sb
, control
->do_not_ref_stcb
? NULL
: stcb
, SCTP_LOG_SBALLOC
, SCTP_BUF_LEN(m
));
4415 sctp_sballoc(stcb
, sb
, m
);
4416 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SB_LOGGING_ENABLE
) {
4417 sctp_sblog(sb
, control
->do_not_ref_stcb
? NULL
: stcb
, SCTP_LOG_SBRESULT
, 0);
4419 atomic_add_int(&control
->length
, SCTP_BUF_LEN(m
));
4420 m
= SCTP_BUF_NEXT(m
);
4423 control
->tail_mbuf
= prev
;
4425 /* Everything got collapsed out?? */
4429 control
->end_added
= 1;
4431 TAILQ_INSERT_TAIL(&inp
->read_queue
, control
, next
);
4432 SCTP_INP_READ_UNLOCK(inp
);
4433 if (inp
&& inp
->sctp_socket
) {
4434 if (sctp_is_feature_on(inp
, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE
)) {
4435 SCTP_ZERO_COPY_EVENT(inp
, inp
->sctp_socket
);
4437 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4440 so
= SCTP_INP_SO(inp
);
4442 atomic_add_int(&stcb
->asoc
.refcnt
, 1);
4443 SCTP_TCB_UNLOCK(stcb
);
4444 SCTP_SOCKET_LOCK(so
, 1);
4445 SCTP_TCB_LOCK(stcb
);
4446 atomic_subtract_int(&stcb
->asoc
.refcnt
, 1);
4447 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) {
4448 SCTP_SOCKET_UNLOCK(so
, 1);
4453 sctp_sorwakeup(inp
, inp
->sctp_socket
);
4454 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4456 SCTP_SOCKET_UNLOCK(so
, 1);
4465 sctp_append_to_readq(struct sctp_inpcb
*inp
,
4466 struct sctp_tcb
*stcb
,
4467 struct sctp_queued_to_read
*control
,
4474 * A partial delivery API event is underway. OR we are appending on
4475 * the reassembly queue.
4477 * If PDAPI this means we need to add m to the end of the data.
4478 * Increase the length in the control AND increment the sb_cc.
4479 * Otherwise sb is NULL and all we need to do is put it at the end
4480 * of the mbuf chain.
4483 struct mbuf
*mm
, *tail
= NULL
, *prev
= NULL
;
4486 SCTP_INP_READ_LOCK(inp
);
4488 if (control
== NULL
) {
4491 SCTP_INP_READ_UNLOCK(inp
);
4495 if (control
->end_added
) {
4496 /* huh this one is complete? */
4504 if (SCTP_BUF_LEN(mm
) == 0) {
4505 /* Skip mbufs with NO lenght */
4508 m
= sctp_m_free(mm
);
4511 SCTP_BUF_NEXT(prev
) = sctp_m_free(mm
);
4512 mm
= SCTP_BUF_NEXT(prev
);
4517 len
+= SCTP_BUF_LEN(mm
);
4519 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SB_LOGGING_ENABLE
) {
4520 sctp_sblog(sb
, control
->do_not_ref_stcb
? NULL
: stcb
, SCTP_LOG_SBALLOC
, SCTP_BUF_LEN(mm
));
4522 sctp_sballoc(stcb
, sb
, mm
);
4523 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SB_LOGGING_ENABLE
) {
4524 sctp_sblog(sb
, control
->do_not_ref_stcb
? NULL
: stcb
, SCTP_LOG_SBRESULT
, 0);
4527 mm
= SCTP_BUF_NEXT(mm
);
4532 /* Really there should always be a prev */
4534 /* Huh nothing left? */
4536 panic("Nothing left to add?");
4543 if (control
->tail_mbuf
) {
4545 SCTP_BUF_NEXT(control
->tail_mbuf
) = m
;
4546 control
->tail_mbuf
= tail
;
4550 if (control
->data
!= NULL
) {
4551 panic("This should NOT happen");
4555 control
->tail_mbuf
= tail
;
4557 atomic_add_int(&control
->length
, len
);
4559 /* message is complete */
4560 if (stcb
&& (control
== stcb
->asoc
.control_pdapi
)) {
4561 stcb
->asoc
.control_pdapi
= NULL
;
4563 control
->held_length
= 0;
4564 control
->end_added
= 1;
4567 control
->do_not_ref_stcb
= 1;
4570 * When we are appending in partial delivery, the cum-ack is used
4571 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4572 * is populated in the outbound sinfo structure from the true cumack
4573 * if the association exists...
4575 control
->sinfo_tsn
= control
->sinfo_cumtsn
= ctls_cumack
;
4577 SCTP_INP_READ_UNLOCK(inp
);
4579 if (inp
&& inp
->sctp_socket
) {
4580 if (sctp_is_feature_on(inp
, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE
)) {
4581 SCTP_ZERO_COPY_EVENT(inp
, inp
->sctp_socket
);
4583 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4586 so
= SCTP_INP_SO(inp
);
4587 atomic_add_int(&stcb
->asoc
.refcnt
, 1);
4588 SCTP_TCB_UNLOCK(stcb
);
4589 SCTP_SOCKET_LOCK(so
, 1);
4590 SCTP_TCB_LOCK(stcb
);
4591 atomic_subtract_int(&stcb
->asoc
.refcnt
, 1);
4592 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) {
4593 SCTP_SOCKET_UNLOCK(so
, 1);
4597 sctp_sorwakeup(inp
, inp
->sctp_socket
);
4598 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4599 SCTP_SOCKET_UNLOCK(so
, 1);
4608 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4609 *************ALTERNATE ROUTING CODE
4612 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4613 *************ALTERNATE ROUTING CODE
4617 sctp_generate_invmanparam(int err
)
4619 /* Return a MBUF with a invalid mandatory parameter */
4622 m
= sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr
), 0, M_DONTWAIT
, 1, MT_DATA
);
4624 struct sctp_paramhdr
*ph
;
4626 SCTP_BUF_LEN(m
) = sizeof(struct sctp_paramhdr
);
4627 ph
= mtod(m
, struct sctp_paramhdr
*);
4628 ph
->param_length
= htons(sizeof(struct sctp_paramhdr
));
4629 ph
->param_type
= htons(err
);
4634 #ifdef SCTP_MBCNT_LOGGING
4636 sctp_free_bufspace(struct sctp_tcb
*stcb
, struct sctp_association
*asoc
,
4637 struct sctp_tmit_chunk
*tp1
, int chk_cnt
)
4639 if (tp1
->data
== NULL
) {
4642 asoc
->chunks_on_out_queue
-= chk_cnt
;
4643 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_MBCNT_LOGGING_ENABLE
) {
4644 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE
,
4645 asoc
->total_output_queue_size
,
4650 if (asoc
->total_output_queue_size
>= tp1
->book_size
) {
4651 atomic_add_int(&asoc
->total_output_queue_size
, -tp1
->book_size
);
4653 asoc
->total_output_queue_size
= 0;
4656 if (stcb
->sctp_socket
&& (((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)) ||
4657 ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
)))) {
4658 if (stcb
->sctp_socket
->so_snd
.sb_cc
>= tp1
->book_size
) {
4659 stcb
->sctp_socket
->so_snd
.sb_cc
-= tp1
->book_size
;
4661 stcb
->sctp_socket
->so_snd
.sb_cc
= 0;
4670 sctp_release_pr_sctp_chunk(struct sctp_tcb
*stcb
, struct sctp_tmit_chunk
*tp1
,
4671 int reason
, struct sctpchunk_listhead
*queue
, int so_locked
4672 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4679 uint8_t foundeom
= 0;
4682 ret_sz
+= tp1
->book_size
;
4683 tp1
->sent
= SCTP_FORWARD_TSN_SKIP
;
4685 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4689 sctp_free_bufspace(stcb
, &stcb
->asoc
, tp1
, 1);
4690 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL
, stcb
, reason
, tp1
, SCTP_SO_NOT_LOCKED
);
4691 sctp_m_freem(tp1
->data
);
4693 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4694 so
= SCTP_INP_SO(stcb
->sctp_ep
);
4696 atomic_add_int(&stcb
->asoc
.refcnt
, 1);
4697 SCTP_TCB_UNLOCK(stcb
);
4698 SCTP_SOCKET_LOCK(so
, 1);
4699 SCTP_TCB_LOCK(stcb
);
4700 atomic_subtract_int(&stcb
->asoc
.refcnt
, 1);
4701 if (stcb
->asoc
.state
& SCTP_STATE_CLOSED_SOCKET
) {
4703 * assoc was freed while we were
4706 SCTP_SOCKET_UNLOCK(so
, 1);
4711 sctp_sowwakeup(stcb
->sctp_ep
, stcb
->sctp_socket
);
4712 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4714 SCTP_SOCKET_UNLOCK(so
, 1);
4718 if (PR_SCTP_BUF_ENABLED(tp1
->flags
)) {
4719 stcb
->asoc
.sent_queue_cnt_removeable
--;
4721 if (queue
== &stcb
->asoc
.send_queue
) {
4722 TAILQ_REMOVE(&stcb
->asoc
.send_queue
, tp1
, sctp_next
);
4723 /* on to the sent queue */
4724 TAILQ_INSERT_TAIL(&stcb
->asoc
.sent_queue
, tp1
,
4726 stcb
->asoc
.sent_queue_cnt
++;
4728 if ((tp1
->rec
.data
.rcv_flags
& SCTP_DATA_NOT_FRAG
) ==
4729 SCTP_DATA_NOT_FRAG
) {
4730 /* not frag'ed we ae done */
4733 } else if (tp1
->rec
.data
.rcv_flags
& SCTP_DATA_LAST_FRAG
) {
4734 /* end of frag, we are done */
4739 * Its a begin or middle piece, we must mark all of
4743 tp1
= TAILQ_NEXT(tp1
, sctp_next
);
4745 } while (tp1
&& notdone
);
4746 if ((foundeom
== 0) && (queue
== &stcb
->asoc
.sent_queue
)) {
4748 * The multi-part message was scattered across the send and
4751 tp1
= TAILQ_FIRST(&stcb
->asoc
.send_queue
);
4753 * recurse throught the send_queue too, starting at the
4757 ret_sz
+= sctp_release_pr_sctp_chunk(stcb
, tp1
, reason
,
4758 &stcb
->asoc
.send_queue
, so_locked
);
4760 SCTP_PRINTF("hmm, nothing on the send queue and no EOM?\n");
4767 * checks to see if the given address, sa, is one that is currently known by
4768 * the kernel note: can't distinguish the same address on multiple interfaces
4769 * and doesn't handle multiple addresses with different zone/scope id's note:
4770 * ifa_ifwithaddr() compares the entire sockaddr struct
4773 sctp_find_ifa_in_ep(struct sctp_inpcb
*inp
, struct sockaddr
*addr
,
4776 struct sctp_laddr
*laddr
;
4778 if (holds_lock
== 0) {
4779 SCTP_INP_RLOCK(inp
);
4781 LIST_FOREACH(laddr
, &inp
->sctp_addr_list
, sctp_nxt_addr
) {
4782 if (laddr
->ifa
== NULL
)
4784 if (addr
->sa_family
!= laddr
->ifa
->address
.sa
.sa_family
)
4786 if (addr
->sa_family
== AF_INET
) {
4787 if (((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
==
4788 laddr
->ifa
->address
.sin
.sin_addr
.s_addr
) {
4790 if (holds_lock
== 0) {
4791 SCTP_INP_RUNLOCK(inp
);
4793 return (laddr
->ifa
);
4798 if (addr
->sa_family
== AF_INET6
) {
4799 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6
*)addr
,
4800 &laddr
->ifa
->address
.sin6
)) {
4802 if (holds_lock
== 0) {
4803 SCTP_INP_RUNLOCK(inp
);
4805 return (laddr
->ifa
);
4811 if (holds_lock
== 0) {
4812 SCTP_INP_RUNLOCK(inp
);
4818 sctp_get_ifa_hash_val(struct sockaddr
*addr
)
4820 if (addr
->sa_family
== AF_INET
) {
4821 struct sockaddr_in
*sin
;
4823 sin
= (struct sockaddr_in
*)addr
;
4824 return (sin
->sin_addr
.s_addr
^ (sin
->sin_addr
.s_addr
>> 16));
4825 } else if (addr
->sa_family
== AF_INET6
) {
4826 struct sockaddr_in6
*sin6
;
4827 uint32_t hash_of_addr
;
4829 sin6
= (struct sockaddr_in6
*)addr
;
4830 hash_of_addr
= (sin6
->sin6_addr
.s6_addr32
[0] +
4831 sin6
->sin6_addr
.s6_addr32
[1] +
4832 sin6
->sin6_addr
.s6_addr32
[2] +
4833 sin6
->sin6_addr
.s6_addr32
[3]);
4834 hash_of_addr
= (hash_of_addr
^ (hash_of_addr
>> 16));
4835 return (hash_of_addr
);
4841 sctp_find_ifa_by_addr(struct sockaddr
*addr
, uint32_t vrf_id
, int holds_lock
)
4843 struct sctp_ifa
*sctp_ifap
;
4844 struct sctp_vrf
*vrf
;
4845 struct sctp_ifalist
*hash_head
;
4846 uint32_t hash_of_addr
;
4848 if (holds_lock
== 0)
4849 SCTP_IPI_ADDR_RLOCK();
4851 vrf
= sctp_find_vrf(vrf_id
);
4854 if (holds_lock
== 0)
4855 SCTP_IPI_ADDR_RUNLOCK();
4858 hash_of_addr
= sctp_get_ifa_hash_val(addr
);
4860 hash_head
= &vrf
->vrf_addr_hash
[(hash_of_addr
& vrf
->vrf_addr_hashmark
)];
4861 if (hash_head
== NULL
) {
4862 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4863 hash_of_addr
, (uint32_t) vrf
->vrf_addr_hashmark
,
4864 (uint32_t) (hash_of_addr
& vrf
->vrf_addr_hashmark
));
4865 sctp_print_address(addr
);
4866 SCTP_PRINTF("No such bucket for address\n");
4867 if (holds_lock
== 0)
4868 SCTP_IPI_ADDR_RUNLOCK();
4872 LIST_FOREACH(sctp_ifap
, hash_head
, next_bucket
) {
4873 if (sctp_ifap
== NULL
) {
4875 panic("Huh LIST_FOREACH corrupt");
4878 SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4882 if (addr
->sa_family
!= sctp_ifap
->address
.sa
.sa_family
)
4884 if (addr
->sa_family
== AF_INET
) {
4885 if (((struct sockaddr_in
*)addr
)->sin_addr
.s_addr
==
4886 sctp_ifap
->address
.sin
.sin_addr
.s_addr
) {
4888 if (holds_lock
== 0)
4889 SCTP_IPI_ADDR_RUNLOCK();
4895 if (addr
->sa_family
== AF_INET6
) {
4896 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6
*)addr
,
4897 &sctp_ifap
->address
.sin6
)) {
4899 if (holds_lock
== 0)
4900 SCTP_IPI_ADDR_RUNLOCK();
4907 if (holds_lock
== 0)
4908 SCTP_IPI_ADDR_RUNLOCK();
4913 sctp_user_rcvd(struct sctp_tcb
*stcb
, uint32_t * freed_so_far
, int hold_rlock
,
4916 /* User pulled some data, do we need a rwnd update? */
4919 struct socket
*so
= NULL
;
4924 atomic_add_int(&stcb
->asoc
.refcnt
, 1);
4926 if (stcb
->asoc
.state
& (SCTP_STATE_ABOUT_TO_BE_FREED
|
4927 SCTP_STATE_SHUTDOWN_RECEIVED
|
4928 SCTP_STATE_SHUTDOWN_ACK_SENT
)) {
4929 /* Pre-check If we are freeing no update */
4932 SCTP_INP_INCR_REF(stcb
->sctp_ep
);
4933 if ((stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) ||
4934 (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_ALLGONE
)) {
4937 so
= stcb
->sctp_socket
;
4941 atomic_add_int(&stcb
->freed_by_sorcv_sincelast
, *freed_so_far
);
4942 /* Have you have freed enough to look */
4944 /* Yep, its worth a look and the lock overhead */
4946 /* Figure out what the rwnd would be */
4947 rwnd
= sctp_calc_rwnd(stcb
, &stcb
->asoc
);
4948 if (rwnd
>= stcb
->asoc
.my_last_reported_rwnd
) {
4949 dif
= rwnd
- stcb
->asoc
.my_last_reported_rwnd
;
4953 if (dif
>= rwnd_req
) {
4955 SCTP_INP_READ_UNLOCK(stcb
->sctp_ep
);
4958 if (stcb
->asoc
.state
& SCTP_STATE_ABOUT_TO_BE_FREED
) {
4960 * One last check before we allow the guy possibly
4961 * to get in. There is a race, where the guy has not
4962 * reached the gate. In that case
4966 SCTP_TCB_LOCK(stcb
);
4967 if (stcb
->asoc
.state
& SCTP_STATE_ABOUT_TO_BE_FREED
) {
4968 /* No reports here */
4969 SCTP_TCB_UNLOCK(stcb
);
4972 SCTP_STAT_INCR(sctps_wu_sacks_sent
);
4973 sctp_send_sack(stcb
);
4974 sctp_chunk_output(stcb
->sctp_ep
, stcb
,
4975 SCTP_OUTPUT_FROM_USR_RCVD
, SCTP_SO_LOCKED
);
4976 /* make sure no timer is running */
4977 sctp_timer_stop(SCTP_TIMER_TYPE_RECV
, stcb
->sctp_ep
, stcb
, NULL
, SCTP_FROM_SCTPUTIL
+ SCTP_LOC_6
);
4978 SCTP_TCB_UNLOCK(stcb
);
4980 /* Update how much we have pending */
4981 stcb
->freed_by_sorcv_sincelast
= dif
;
4984 if (so
&& r_unlocked
&& hold_rlock
) {
4985 SCTP_INP_READ_LOCK(stcb
->sctp_ep
);
4987 SCTP_INP_DECR_REF(stcb
->sctp_ep
);
4989 atomic_add_int(&stcb
->asoc
.refcnt
, -1);
4994 sctp_sorecvmsg(struct socket
*so
,
4997 struct sockaddr
*from
,
5000 struct sctp_sndrcvinfo
*sinfo
,
5004 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5005 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5006 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5007 * On the way out we may send out any combination of:
5008 * MSG_NOTIFICATION MSG_EOR
5011 struct sctp_inpcb
*inp
= NULL
;
5013 int cp_len
= 0, error
= 0;
5014 struct sctp_queued_to_read
*control
= NULL
, *ctl
= NULL
, *nxt
= NULL
;
5015 struct mbuf
*m
= NULL
, *embuf
= NULL
;
5016 struct sctp_tcb
*stcb
= NULL
;
5017 int wakeup_read_socket
= 0;
5018 int freecnt_applied
= 0;
5019 int out_flags
= 0, in_flags
= 0;
5020 int block_allowed
= 1;
5021 uint32_t freed_so_far
= 0;
5022 uint32_t copied_so_far
= 0;
5023 int in_eeor_mode
= 0;
5024 int no_rcv_needed
= 0;
5025 uint32_t rwnd_req
= 0;
5026 int hold_sblock
= 0;
5029 uint32_t held_length
= 0;
5030 int sockbuf_lock
= 0;
5033 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EINVAL
);
5037 in_flags
= *msg_flags
;
5038 if (in_flags
& MSG_PEEK
)
5039 SCTP_STAT_INCR(sctps_read_peeks
);
5043 slen
= uio
->uio_resid
;
5045 /* Pull in and set up our int flags */
5046 if (in_flags
& MSG_OOB
) {
5047 /* Out of band's NOT supported */
5048 return (EOPNOTSUPP
);
5050 if ((in_flags
& MSG_PEEK
) && (mp
!= NULL
)) {
5051 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EINVAL
);
5054 if ((in_flags
& (MSG_DONTWAIT
5057 SCTP_SO_IS_NBIO(so
)) {
5060 /* setup the endpoint */
5061 inp
= (struct sctp_inpcb
*)so
->so_pcb
;
5063 SCTP_LTRACE_ERR_RET(NULL
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EFAULT
);
5066 rwnd_req
= (SCTP_SB_LIMIT_RCV(so
) >> SCTP_RWND_HIWAT_SHIFT
);
5067 /* Must be at least a MTU's worth */
5068 if (rwnd_req
< SCTP_MIN_RWND
)
5069 rwnd_req
= SCTP_MIN_RWND
;
5070 in_eeor_mode
= sctp_is_feature_on(inp
, SCTP_PCB_FLAGS_EXPLICIT_EOR
);
5071 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_RECV_RWND_LOGGING_ENABLE
) {
5072 sctp_misc_ints(SCTP_SORECV_ENTER
,
5073 rwnd_req
, in_eeor_mode
, so
->so_rcv
.sb_cc
, uio
->uio_resid
);
5075 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_RECV_RWND_LOGGING_ENABLE
) {
5076 sctp_misc_ints(SCTP_SORECV_ENTERPL
,
5077 rwnd_req
, block_allowed
, so
->so_rcv
.sb_cc
, uio
->uio_resid
);
5079 error
= sblock(&so
->so_rcv
, (block_allowed
? SBL_WAIT
: 0));
5082 goto release_unlocked
;
5088 if (hold_sblock
== 0) {
5089 SOCKBUF_LOCK(&so
->so_rcv
);
5092 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) ||
5093 (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_ALLGONE
)) {
5096 if (so
->so_rcv
.sb_state
& SBS_CANTRCVMORE
) {
5098 error
= so
->so_error
;
5099 if ((in_flags
& MSG_PEEK
) == 0)
5103 if (so
->so_rcv
.sb_cc
== 0) {
5104 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, ENOTCONN
);
5111 if ((so
->so_rcv
.sb_cc
<= held_length
) && block_allowed
) {
5112 /* we need to wait for data */
5113 if ((so
->so_rcv
.sb_cc
== 0) &&
5114 ((inp
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) ||
5115 (inp
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
))) {
5116 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_CONNECTED
) == 0) {
5118 * For active open side clear flags for
5119 * re-use passive open is blocked by
5122 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_WAS_ABORTED
) {
5124 * You were aborted, passive side
5127 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, ECONNRESET
);
5130 * You get this once if you are
5133 if (!(inp
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)) {
5135 * Remove flag if on the
5138 inp
->sctp_flags
&= ~SCTP_PCB_FLAGS_WAS_ABORTED
;
5141 so
->so_state
&= ~(SS_ISCONNECTING
|
5142 SS_ISDISCONNECTING
|
5146 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_WAS_CONNECTED
) == 0) {
5147 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, ENOTCONN
);
5150 inp
->sctp_flags
&= ~SCTP_PCB_FLAGS_WAS_CONNECTED
;
5156 error
= sbwait(&so
->so_rcv
);
5161 goto restart_nosblocks
;
5162 } else if (so
->so_rcv
.sb_cc
== 0) {
5164 error
= so
->so_error
;
5165 if ((in_flags
& MSG_PEEK
) == 0)
5168 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_TCPTYPE
) ||
5169 (inp
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)) {
5170 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_CONNECTED
) == 0) {
5172 * For active open side clear flags
5173 * for re-use passive open is
5174 * blocked by connect.
5176 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_WAS_ABORTED
) {
5178 * You were aborted, passive
5179 * side always hits here
5181 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, ECONNRESET
);
5184 * You get this once if you
5185 * are active open side
5187 if (!(inp
->sctp_flags
& SCTP_PCB_FLAGS_IN_TCPPOOL
)) {
5193 inp
->sctp_flags
&= ~SCTP_PCB_FLAGS_WAS_ABORTED
;
5196 so
->so_state
&= ~(SS_ISCONNECTING
|
5197 SS_ISDISCONNECTING
|
5201 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_WAS_CONNECTED
) == 0) {
5202 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, ENOTCONN
);
5205 inp
->sctp_flags
&= ~SCTP_PCB_FLAGS_WAS_CONNECTED
;
5211 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EWOULDBLOCK
);
5212 error
= EWOULDBLOCK
;
5216 if (hold_sblock
== 1) {
5217 SOCKBUF_UNLOCK(&so
->so_rcv
);
5220 /* we possibly have data we can read */
5221 /* sa_ignore FREED_MEMORY */
5222 control
= TAILQ_FIRST(&inp
->read_queue
);
5223 if (control
== NULL
) {
5225 * This could be happening since the appender did the
5226 * increment but as not yet did the tailq insert onto the
5229 if (hold_rlock
== 0) {
5230 SCTP_INP_READ_LOCK(inp
);
5233 control
= TAILQ_FIRST(&inp
->read_queue
);
5234 if ((control
== NULL
) && (so
->so_rcv
.sb_cc
!= 0)) {
5236 panic("Huh, its non zero and nothing on control?");
5238 so
->so_rcv
.sb_cc
= 0;
5240 SCTP_INP_READ_UNLOCK(inp
);
5244 if ((control
->length
== 0) &&
5245 (control
->do_not_ref_stcb
)) {
5247 * Clean up code for freeing assoc that left behind a
5248 * pdapi.. maybe a peer in EEOR that just closed after
5249 * sending and never indicated a EOR.
5251 if (hold_rlock
== 0) {
5253 SCTP_INP_READ_LOCK(inp
);
5255 control
->held_length
= 0;
5256 if (control
->data
) {
5257 /* Hmm there is data here .. fix */
5261 m_tmp
= control
->data
;
5263 cnt
+= SCTP_BUF_LEN(m_tmp
);
5264 if (SCTP_BUF_NEXT(m_tmp
) == NULL
) {
5265 control
->tail_mbuf
= m_tmp
;
5266 control
->end_added
= 1;
5268 m_tmp
= SCTP_BUF_NEXT(m_tmp
);
5270 control
->length
= cnt
;
5273 TAILQ_REMOVE(&inp
->read_queue
, control
, next
);
5274 /* Add back any hiddend data */
5275 sctp_free_remote_addr(control
->whoFrom
);
5276 sctp_free_a_readq(stcb
, control
);
5280 SCTP_INP_READ_UNLOCK(inp
);
5284 if (control
->length
== 0) {
5285 if ((sctp_is_feature_on(inp
, SCTP_PCB_FLAGS_FRAG_INTERLEAVE
)) &&
5287 /* find a more suitable one then this */
5288 ctl
= TAILQ_NEXT(control
, next
);
5290 if ((ctl
->stcb
!= control
->stcb
) && (ctl
->length
) &&
5292 (ctl
->spec_flags
& M_NOTIFICATION
) ||
5293 ((ctl
->do_not_ref_stcb
== 0) &&
5294 (ctl
->stcb
->asoc
.strmin
[ctl
->sinfo_stream
].delivery_started
== 0)))
5297 * If we have a different TCB next, and there is data
5298 * present. If we have already taken some (pdapi), OR we can
5299 * ref the tcb and no delivery as started on this stream, we
5300 * take it. Note we allow a notification on a different
5301 * assoc to be delivered..
5305 } else if ((sctp_is_feature_on(inp
, SCTP_PCB_FLAGS_INTERLEAVE_STRMS
)) &&
5307 ((ctl
->some_taken
) ||
5308 ((ctl
->do_not_ref_stcb
== 0) &&
5309 ((ctl
->spec_flags
& M_NOTIFICATION
) == 0) &&
5310 (ctl
->stcb
->asoc
.strmin
[ctl
->sinfo_stream
].delivery_started
== 0)))
5313 * If we have the same tcb, and there is data present, and we
5314 * have the strm interleave feature present. Then if we have
5315 * taken some (pdapi) or we can refer to tht tcb AND we have
5316 * not started a delivery for this stream, we can take it.
5317 * Note we do NOT allow a notificaiton on the same assoc to
5323 ctl
= TAILQ_NEXT(ctl
, next
);
5327 * if we reach here, not suitable replacement is available
5328 * <or> fragment interleave is NOT on. So stuff the sb_cc
5329 * into the our held count, and its time to sleep again.
5331 held_length
= so
->so_rcv
.sb_cc
;
5332 control
->held_length
= so
->so_rcv
.sb_cc
;
5335 /* Clear the held length since there is something to read */
5336 control
->held_length
= 0;
5338 SCTP_INP_READ_UNLOCK(inp
);
5343 * If we reach here, control has a some data for us to read off.
5344 * Note that stcb COULD be NULL.
5346 control
->some_taken
++;
5348 SOCKBUF_UNLOCK(&so
->so_rcv
);
5351 stcb
= control
->stcb
;
5353 if ((control
->do_not_ref_stcb
== 0) &&
5354 (stcb
->asoc
.state
& SCTP_STATE_ABOUT_TO_BE_FREED
)) {
5355 if (freecnt_applied
== 0)
5357 } else if (control
->do_not_ref_stcb
== 0) {
5358 /* you can't free it on me please */
5360 * The lock on the socket buffer protects us so the
5361 * free code will stop. But since we used the
5362 * socketbuf lock and the sender uses the tcb_lock
5363 * to increment, we need to use the atomic add to
5366 if (freecnt_applied
) {
5368 panic("refcnt already incremented");
5370 printf("refcnt already incremented?\n");
5373 atomic_add_int(&stcb
->asoc
.refcnt
, 1);
5374 freecnt_applied
= 1;
5377 * Setup to remember how much we have not yet told
5378 * the peer our rwnd has opened up. Note we grab the
5379 * value from the tcb from last time. Note too that
5380 * sack sending clears this when a sack is sent,
5381 * which is fine. Once we hit the rwnd_req, we then
5382 * will go to the sctp_user_rcvd() that will not
5383 * lock until it KNOWs it MUST send a WUP-SACK.
5385 freed_so_far
= stcb
->freed_by_sorcv_sincelast
;
5386 stcb
->freed_by_sorcv_sincelast
= 0;
5390 ((control
->spec_flags
& M_NOTIFICATION
) == 0) &&
5391 control
->do_not_ref_stcb
== 0) {
5392 stcb
->asoc
.strmin
[control
->sinfo_stream
].delivery_started
= 1;
5394 /* First lets get off the sinfo and sockaddr info */
5395 if ((sinfo
) && filling_sinfo
) {
5396 memcpy(sinfo
, control
, sizeof(struct sctp_nonpad_sndrcvinfo
));
5397 nxt
= TAILQ_NEXT(control
, next
);
5398 if (sctp_is_feature_on(inp
, SCTP_PCB_FLAGS_EXT_RCVINFO
)) {
5399 struct sctp_extrcvinfo
*s_extra
;
5401 s_extra
= (struct sctp_extrcvinfo
*)sinfo
;
5404 s_extra
->sreinfo_next_flags
= SCTP_NEXT_MSG_AVAIL
;
5405 if (nxt
->sinfo_flags
& SCTP_UNORDERED
) {
5406 s_extra
->sreinfo_next_flags
|= SCTP_NEXT_MSG_IS_UNORDERED
;
5408 if (nxt
->spec_flags
& M_NOTIFICATION
) {
5409 s_extra
->sreinfo_next_flags
|= SCTP_NEXT_MSG_IS_NOTIFICATION
;
5411 s_extra
->sreinfo_next_aid
= nxt
->sinfo_assoc_id
;
5412 s_extra
->sreinfo_next_length
= nxt
->length
;
5413 s_extra
->sreinfo_next_ppid
= nxt
->sinfo_ppid
;
5414 s_extra
->sreinfo_next_stream
= nxt
->sinfo_stream
;
5415 if (nxt
->tail_mbuf
!= NULL
) {
5416 if (nxt
->end_added
) {
5417 s_extra
->sreinfo_next_flags
|= SCTP_NEXT_MSG_ISCOMPLETE
;
5422 * we explicitly 0 this, since the memcpy
5423 * got some other things beyond the older
5424 * sinfo_ that is on the control's structure
5428 s_extra
->sreinfo_next_flags
= SCTP_NO_NEXT_MSG
;
5429 s_extra
->sreinfo_next_aid
= 0;
5430 s_extra
->sreinfo_next_length
= 0;
5431 s_extra
->sreinfo_next_ppid
= 0;
5432 s_extra
->sreinfo_next_stream
= 0;
5436 * update off the real current cum-ack, if we have an stcb.
5438 if ((control
->do_not_ref_stcb
== 0) && stcb
)
5439 sinfo
->sinfo_cumtsn
= stcb
->asoc
.cumulative_tsn
;
5441 * mask off the high bits, we keep the actual chunk bits in
5444 sinfo
->sinfo_flags
&= 0x00ff;
5445 if ((control
->sinfo_flags
>> 8) & SCTP_DATA_UNORDERED
) {
5446 sinfo
->sinfo_flags
|= SCTP_UNORDERED
;
5449 #ifdef SCTP_ASOCLOG_OF_TSNS
5451 int index
, newindex
;
5452 struct sctp_pcbtsn_rlog
*entry
;
5455 index
= inp
->readlog_index
;
5456 newindex
= index
+ 1;
5457 if (newindex
>= SCTP_READ_LOG_SIZE
) {
5460 } while (atomic_cmpset_int(&inp
->readlog_index
, index
, newindex
) == 0);
5461 entry
= &inp
->readlog
[index
];
5462 entry
->vtag
= control
->sinfo_assoc_id
;
5463 entry
->strm
= control
->sinfo_stream
;
5464 entry
->seq
= control
->sinfo_ssn
;
5465 entry
->sz
= control
->length
;
5466 entry
->flgs
= control
->sinfo_flags
;
5469 if (fromlen
&& from
) {
5470 struct sockaddr
*to
;
5473 cp_len
= min((size_t)fromlen
, (size_t)control
->whoFrom
->ro
._l_addr
.sin
.sin_len
);
5474 memcpy(from
, &control
->whoFrom
->ro
._l_addr
, cp_len
);
5475 ((struct sockaddr_in
*)from
)->sin_port
= control
->port_from
;
5477 /* No AF_INET use AF_INET6 */
5478 cp_len
= min((size_t)fromlen
, (size_t)control
->whoFrom
->ro
._l_addr
.sin6
.sin6_len
);
5479 memcpy(from
, &control
->whoFrom
->ro
._l_addr
, cp_len
);
5480 ((struct sockaddr_in6
*)from
)->sin6_port
= control
->port_from
;
5484 #if defined(INET) && defined(INET6)
5485 if ((sctp_is_feature_on(inp
, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4
)) &&
5486 (to
->sa_family
== AF_INET
) &&
5487 ((size_t)fromlen
>= sizeof(struct sockaddr_in6
))) {
5488 struct sockaddr_in
*sin
;
5489 struct sockaddr_in6 sin6
;
5491 sin
= (struct sockaddr_in
*)to
;
5492 bzero(&sin6
, sizeof(sin6
));
5493 sin6
.sin6_family
= AF_INET6
;
5494 sin6
.sin6_len
= sizeof(struct sockaddr_in6
);
5495 sin6
.sin6_addr
.s6_addr32
[2] = htonl(0xffff);
5496 bcopy(&sin
->sin_addr
,
5497 &sin6
.sin6_addr
.s6_addr32
[3],
5498 sizeof(sin6
.sin6_addr
.s6_addr32
[3]));
5499 sin6
.sin6_port
= sin
->sin_port
;
5500 memcpy(from
, (caddr_t
)&sin6
, sizeof(sin6
));
5505 struct sockaddr_in6 lsa6
, *to6
;
5507 to6
= (struct sockaddr_in6
*)to
;
5508 sctp_recover_scope_mac(to6
, (&lsa6
));
5512 /* now copy out what data we can */
5514 /* copy out each mbuf in the chain up to length */
5518 /* Move out all we can */
5519 cp_len
= (int)uio
->uio_resid
;
5520 my_len
= (int)SCTP_BUF_LEN(m
);
5521 if (cp_len
> my_len
) {
5522 /* not enough in this buf */
5526 SCTP_INP_READ_UNLOCK(inp
);
5530 error
= uiomove(mtod(m
, char *), cp_len
, uio
);
5532 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
) {
5535 if ((control
->do_not_ref_stcb
== 0) && stcb
&&
5536 stcb
->asoc
.state
& SCTP_STATE_ABOUT_TO_BE_FREED
) {
5540 /* error we are out of here */
5543 if ((SCTP_BUF_NEXT(m
) == NULL
) &&
5544 (cp_len
>= SCTP_BUF_LEN(m
)) &&
5545 ((control
->end_added
== 0) ||
5546 (control
->end_added
&&
5547 (TAILQ_NEXT(control
, next
) == NULL
)))
5549 SCTP_INP_READ_LOCK(inp
);
5552 if (cp_len
== SCTP_BUF_LEN(m
)) {
5553 if ((SCTP_BUF_NEXT(m
) == NULL
) &&
5554 (control
->end_added
)) {
5555 out_flags
|= MSG_EOR
;
5556 if ((control
->do_not_ref_stcb
== 0) && ((control
->spec_flags
& M_NOTIFICATION
) == 0))
5557 control
->stcb
->asoc
.strmin
[control
->sinfo_stream
].delivery_started
= 0;
5559 if (control
->spec_flags
& M_NOTIFICATION
) {
5560 out_flags
|= MSG_NOTIFICATION
;
5562 /* we ate up the mbuf */
5563 if (in_flags
& MSG_PEEK
) {
5565 m
= SCTP_BUF_NEXT(m
);
5566 copied_so_far
+= cp_len
;
5568 /* dispose of the mbuf */
5569 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SB_LOGGING_ENABLE
) {
5570 sctp_sblog(&so
->so_rcv
,
5571 control
->do_not_ref_stcb
? NULL
: stcb
, SCTP_LOG_SBFREE
, SCTP_BUF_LEN(m
));
5573 sctp_sbfree(control
, stcb
, &so
->so_rcv
, m
);
5574 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SB_LOGGING_ENABLE
) {
5575 sctp_sblog(&so
->so_rcv
,
5576 control
->do_not_ref_stcb
? NULL
: stcb
, SCTP_LOG_SBRESULT
, 0);
5579 copied_so_far
+= cp_len
;
5580 freed_so_far
+= cp_len
;
5581 freed_so_far
+= MSIZE
;
5582 atomic_subtract_int(&control
->length
, cp_len
);
5583 control
->data
= sctp_m_free(m
);
5586 * been through it all, must hold sb
5587 * lock ok to null tail
5589 if (control
->data
== NULL
) {
5591 if ((control
->end_added
== 0) ||
5592 (TAILQ_NEXT(control
, next
) == NULL
)) {
5600 if (mtx_owned(&inp
->inp_rdata_mtx
) == 0) {
5601 panic("Hmm we don't own the lock?");
5605 control
->tail_mbuf
= NULL
;
5607 if ((control
->end_added
) && ((out_flags
& MSG_EOR
) == 0)) {
5608 panic("end_added, nothing left and no MSG_EOR");
5614 /* Do we need to trim the mbuf? */
5615 if (control
->spec_flags
& M_NOTIFICATION
) {
5616 out_flags
|= MSG_NOTIFICATION
;
5618 if ((in_flags
& MSG_PEEK
) == 0) {
5619 SCTP_BUF_RESV_UF(m
, cp_len
);
5620 SCTP_BUF_LEN(m
) -= cp_len
;
5621 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SB_LOGGING_ENABLE
) {
5622 sctp_sblog(&so
->so_rcv
, control
->do_not_ref_stcb
? NULL
: stcb
, SCTP_LOG_SBFREE
, cp_len
);
5624 atomic_subtract_int(&so
->so_rcv
.sb_cc
, cp_len
);
5625 if ((control
->do_not_ref_stcb
== 0) &&
5627 atomic_subtract_int(&stcb
->asoc
.sb_cc
, cp_len
);
5629 copied_so_far
+= cp_len
;
5631 freed_so_far
+= cp_len
;
5632 freed_so_far
+= MSIZE
;
5633 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SB_LOGGING_ENABLE
) {
5634 sctp_sblog(&so
->so_rcv
, control
->do_not_ref_stcb
? NULL
: stcb
,
5635 SCTP_LOG_SBRESULT
, 0);
5637 atomic_subtract_int(&control
->length
, cp_len
);
5639 copied_so_far
+= cp_len
;
5642 if ((out_flags
& MSG_EOR
) || (uio
->uio_resid
== 0)) {
5645 if (((stcb
) && (in_flags
& MSG_PEEK
) == 0) &&
5646 (control
->do_not_ref_stcb
== 0) &&
5647 (freed_so_far
>= rwnd_req
)) {
5648 sctp_user_rcvd(stcb
, &freed_so_far
, hold_rlock
, rwnd_req
);
5650 } /* end while(m) */
5652 * At this point we have looked at it all and we either have
5653 * a MSG_EOR/or read all the user wants... <OR>
5654 * control->length == 0.
5656 if ((out_flags
& MSG_EOR
) && ((in_flags
& MSG_PEEK
) == 0)) {
5657 /* we are done with this control */
5658 if (control
->length
== 0) {
5659 if (control
->data
) {
5661 panic("control->data not null at read eor?");
5663 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5664 sctp_m_freem(control
->data
);
5665 control
->data
= NULL
;
5669 if (TAILQ_NEXT(control
, next
) == NULL
) {
5671 * If we don't have a next we need a
5672 * lock, if there is a next
5673 * interrupt is filling ahead of us
5674 * and we don't need a lock to
5675 * remove this guy (which is the
5676 * head of the queue).
5678 if (hold_rlock
== 0) {
5679 SCTP_INP_READ_LOCK(inp
);
5683 TAILQ_REMOVE(&inp
->read_queue
, control
, next
);
5684 /* Add back any hiddend data */
5685 if (control
->held_length
) {
5687 control
->held_length
= 0;
5688 wakeup_read_socket
= 1;
5690 if (control
->aux_data
) {
5691 sctp_m_free(control
->aux_data
);
5692 control
->aux_data
= NULL
;
5694 no_rcv_needed
= control
->do_not_ref_stcb
;
5695 sctp_free_remote_addr(control
->whoFrom
);
5696 control
->data
= NULL
;
5697 sctp_free_a_readq(stcb
, control
);
5699 if ((freed_so_far
>= rwnd_req
) &&
5700 (no_rcv_needed
== 0))
5701 sctp_user_rcvd(stcb
, &freed_so_far
, hold_rlock
, rwnd_req
);
5705 * The user did not read all of this
5706 * message, turn off the returned MSG_EOR
5707 * since we are leaving more behind on the
5711 if (control
->end_added
&&
5712 (control
->data
== NULL
) &&
5713 (control
->tail_mbuf
== NULL
)) {
5714 panic("Gak, control->length is corrupt?");
5717 no_rcv_needed
= control
->do_not_ref_stcb
;
5718 out_flags
&= ~MSG_EOR
;
5721 if (out_flags
& MSG_EOR
) {
5724 if ((uio
->uio_resid
== 0) ||
5725 ((in_eeor_mode
) && (copied_so_far
>= max(so
->so_rcv
.sb_lowat
, 1)))
5730 * If I hit here the receiver wants more and this message is
5731 * NOT done (pd-api). So two questions. Can we block? if not
5732 * we are done. Did the user NOT set MSG_WAITALL?
5734 if (block_allowed
== 0) {
5738 * We need to wait for more data a few things: - We don't
5739 * sbunlock() so we don't get someone else reading. - We
5740 * must be sure to account for the case where what is added
5741 * is NOT to our control when we wakeup.
5745 * Do we need to tell the transport a rwnd update might be
5746 * needed before we go to sleep?
5748 if (((stcb
) && (in_flags
& MSG_PEEK
) == 0) &&
5749 ((freed_so_far
>= rwnd_req
) &&
5750 (control
->do_not_ref_stcb
== 0) &&
5751 (no_rcv_needed
== 0))) {
5752 sctp_user_rcvd(stcb
, &freed_so_far
, hold_rlock
, rwnd_req
);
5755 if (so
->so_rcv
.sb_state
& SBS_CANTRCVMORE
) {
5758 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_SOCKET_GONE
)
5761 if (hold_rlock
== 1) {
5762 SCTP_INP_READ_UNLOCK(inp
);
5765 if (hold_sblock
== 0) {
5766 SOCKBUF_LOCK(&so
->so_rcv
);
5769 if ((copied_so_far
) && (control
->length
== 0) &&
5770 (sctp_is_feature_on(inp
, SCTP_PCB_FLAGS_FRAG_INTERLEAVE
))
5774 if (so
->so_rcv
.sb_cc
<= control
->held_length
) {
5775 error
= sbwait(&so
->so_rcv
);
5779 control
->held_length
= 0;
5782 SOCKBUF_UNLOCK(&so
->so_rcv
);
5785 if (control
->length
== 0) {
5786 /* still nothing here */
5787 if (control
->end_added
== 1) {
5788 /* he aborted, or is done i.e.did a shutdown */
5789 out_flags
|= MSG_EOR
;
5790 if (control
->pdapi_aborted
) {
5791 if ((control
->do_not_ref_stcb
== 0) && ((control
->spec_flags
& M_NOTIFICATION
) == 0))
5792 control
->stcb
->asoc
.strmin
[control
->sinfo_stream
].delivery_started
= 0;
5794 out_flags
|= MSG_TRUNC
;
5796 if ((control
->do_not_ref_stcb
== 0) && ((control
->spec_flags
& M_NOTIFICATION
) == 0))
5797 control
->stcb
->asoc
.strmin
[control
->sinfo_stream
].delivery_started
= 0;
5799 goto done_with_control
;
5801 if (so
->so_rcv
.sb_cc
> held_length
) {
5802 control
->held_length
= so
->so_rcv
.sb_cc
;
5805 goto wait_some_more
;
5806 } else if (control
->data
== NULL
) {
5808 * we must re-sync since data is probably being
5811 SCTP_INP_READ_LOCK(inp
);
5812 if ((control
->length
> 0) && (control
->data
== NULL
)) {
5814 * big trouble.. we have the lock and its
5818 panic("Impossible data==NULL length !=0");
5820 out_flags
|= MSG_EOR
;
5821 out_flags
|= MSG_TRUNC
;
5822 control
->length
= 0;
5823 SCTP_INP_READ_UNLOCK(inp
);
5824 goto done_with_control
;
5826 SCTP_INP_READ_UNLOCK(inp
);
5827 /* We will fall around to get more data */
5832 * Give caller back the mbuf chain,
5833 * store in uio_resid the length
5835 wakeup_read_socket
= 0;
5836 if ((control
->end_added
== 0) ||
5837 (TAILQ_NEXT(control
, next
) == NULL
)) {
5838 /* Need to get rlock */
5839 if (hold_rlock
== 0) {
5840 SCTP_INP_READ_LOCK(inp
);
5844 if (control
->end_added
) {
5845 out_flags
|= MSG_EOR
;
5846 if ((control
->do_not_ref_stcb
== 0) && ((control
->spec_flags
& M_NOTIFICATION
) == 0))
5847 control
->stcb
->asoc
.strmin
[control
->sinfo_stream
].delivery_started
= 0;
5849 if (control
->spec_flags
& M_NOTIFICATION
) {
5850 out_flags
|= MSG_NOTIFICATION
;
5852 uio
->uio_resid
= control
->length
;
5853 *mp
= control
->data
;
5856 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SB_LOGGING_ENABLE
) {
5857 sctp_sblog(&so
->so_rcv
,
5858 control
->do_not_ref_stcb
? NULL
: stcb
, SCTP_LOG_SBFREE
, SCTP_BUF_LEN(m
));
5860 sctp_sbfree(control
, stcb
, &so
->so_rcv
, m
);
5861 freed_so_far
+= SCTP_BUF_LEN(m
);
5862 freed_so_far
+= MSIZE
;
5863 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_SB_LOGGING_ENABLE
) {
5864 sctp_sblog(&so
->so_rcv
,
5865 control
->do_not_ref_stcb
? NULL
: stcb
, SCTP_LOG_SBRESULT
, 0);
5867 m
= SCTP_BUF_NEXT(m
);
5869 control
->data
= control
->tail_mbuf
= NULL
;
5870 control
->length
= 0;
5871 if (out_flags
& MSG_EOR
) {
5872 /* Done with this control */
5873 goto done_with_control
;
5877 if (hold_rlock
== 1) {
5878 SCTP_INP_READ_UNLOCK(inp
);
5881 if (hold_sblock
== 1) {
5882 SOCKBUF_UNLOCK(&so
->so_rcv
);
5885 sbunlock(&so
->so_rcv
);
5890 SOCKBUF_UNLOCK(&so
->so_rcv
);
5893 if ((stcb
) && (in_flags
& MSG_PEEK
) == 0) {
5894 if ((freed_so_far
>= rwnd_req
) &&
5895 (control
&& (control
->do_not_ref_stcb
== 0)) &&
5896 (no_rcv_needed
== 0))
5897 sctp_user_rcvd(stcb
, &freed_so_far
, hold_rlock
, rwnd_req
);
5900 *msg_flags
= out_flags
;
5902 if (((out_flags
& MSG_EOR
) == 0) &&
5903 ((in_flags
& MSG_PEEK
) == 0) &&
5905 (sctp_is_feature_on(inp
, SCTP_PCB_FLAGS_EXT_RCVINFO
))) {
5906 struct sctp_extrcvinfo
*s_extra
;
5908 s_extra
= (struct sctp_extrcvinfo
*)sinfo
;
5909 s_extra
->sreinfo_next_flags
= SCTP_NO_NEXT_MSG
;
5911 if (hold_rlock
== 1) {
5912 SCTP_INP_READ_UNLOCK(inp
);
5916 SOCKBUF_UNLOCK(&so
->so_rcv
);
5920 sbunlock(&so
->so_rcv
);
5922 if (freecnt_applied
) {
5924 * The lock on the socket buffer protects us so the free
5925 * code will stop. But since we used the socketbuf lock and
5926 * the sender uses the tcb_lock to increment, we need to use
5927 * the atomic add to the refcnt.
5931 panic("stcb for refcnt has gone NULL?");
5937 atomic_add_int(&stcb
->asoc
.refcnt
, -1);
5938 freecnt_applied
= 0;
5939 /* Save the value back for next time */
5940 stcb
->freed_by_sorcv_sincelast
= freed_so_far
;
5942 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_RECV_RWND_LOGGING_ENABLE
) {
5944 sctp_misc_ints(SCTP_SORECV_DONE
,
5946 ((uio
) ? (slen
- uio
->uio_resid
) : slen
),
5950 sctp_misc_ints(SCTP_SORECV_DONE
,
5952 ((uio
) ? (slen
- uio
->uio_resid
) : slen
),
5958 if (wakeup_read_socket
) {
5959 sctp_sorwakeup(inp
, so
);
5965 #ifdef SCTP_MBUF_LOGGING
5967 sctp_m_free(struct mbuf
*m
)
5969 if (SCTP_BASE_SYSCTL(sctp_logging_level
) & SCTP_MBUF_LOGGING_ENABLE
) {
5970 if (SCTP_BUF_IS_EXTENDED(m
)) {
5971 sctp_log_mb(m
, SCTP_MBUF_IFREE
);
5978 sctp_m_freem(struct mbuf
*mb
)
5981 mb
= sctp_m_free(mb
);
5987 sctp_dynamic_set_primary(struct sockaddr
*sa
, uint32_t vrf_id
)
5990 * Given a local address. For all associations that holds the
5991 * address, request a peer-set-primary.
5993 struct sctp_ifa
*ifa
;
5994 struct sctp_laddr
*wi
;
5996 ifa
= sctp_find_ifa_by_addr(sa
, vrf_id
, 0);
5998 SCTP_LTRACE_ERR_RET(NULL
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EADDRNOTAVAIL
);
5999 return (EADDRNOTAVAIL
);
6002 * Now that we have the ifa we must awaken the iterator with this
6005 wi
= SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr
), struct sctp_laddr
);
6007 SCTP_LTRACE_ERR_RET(NULL
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, ENOMEM
);
6010 /* Now incr the count and int wi structure */
6011 SCTP_INCR_LADDR_COUNT();
6012 bzero(wi
, sizeof(*wi
));
6013 (void)SCTP_GETTIME_TIMEVAL(&wi
->start_time
);
6015 wi
->action
= SCTP_SET_PRIM_ADDR
;
6016 atomic_add_int(&ifa
->refcount
, 1);
6018 /* Now add it to the work queue */
6019 SCTP_IPI_ITERATOR_WQ_LOCK();
6021 * Should this really be a tailq? As it is we will process the
6024 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq
), wi
, sctp_nxt_addr
);
6025 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ
,
6026 (struct sctp_inpcb
*)NULL
,
6027 (struct sctp_tcb
*)NULL
,
6028 (struct sctp_nets
*)NULL
);
6029 SCTP_IPI_ITERATOR_WQ_UNLOCK();
6035 sctp_soreceive(struct socket
*so
,
6036 struct sockaddr
**psa
,
6039 struct mbuf
**controlp
,
6043 uint8_t sockbuf
[256];
6044 struct sockaddr
*from
;
6045 struct sctp_extrcvinfo sinfo
;
6046 int filling_sinfo
= 1;
6047 struct sctp_inpcb
*inp
;
6049 inp
= (struct sctp_inpcb
*)so
->so_pcb
;
6050 /* pickup the assoc we are reading from */
6052 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EINVAL
);
6055 if ((sctp_is_feature_off(inp
,
6056 SCTP_PCB_FLAGS_RECVDATAIOEVNT
)) ||
6057 (controlp
== NULL
)) {
6058 /* user does not want the sndrcv ctl */
6062 from
= (struct sockaddr
*)sockbuf
;
6063 fromlen
= sizeof(sockbuf
);
6070 error
= sctp_sorecvmsg(so
, uio
, mp0
, from
, fromlen
, flagsp
,
6071 (struct sctp_sndrcvinfo
*)&sinfo
, filling_sinfo
);
6072 if ((controlp
) && (filling_sinfo
)) {
6073 /* copy back the sinfo in a CMSG format */
6075 *controlp
= sctp_build_ctl_nchunk(inp
,
6076 (struct sctp_sndrcvinfo
*)&sinfo
);
6081 /* copy back the address info */
6082 if (from
&& from
->sa_len
) {
6083 *psa
= sodupsockaddr(from
, M_NOWAIT
);
6093 sctp_l_soreceive(struct socket
*so
,
6094 struct sockaddr
**name
,
6101 uint8_t sockbuf
[256];
6102 struct sockaddr
*from
;
6103 struct sctp_extrcvinfo sinfo
;
6104 int filling_sinfo
= 1;
6105 struct sctp_inpcb
*inp
;
6107 inp
= (struct sctp_inpcb
*)so
->so_pcb
;
6108 /* pickup the assoc we are reading from */
6110 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EINVAL
);
6113 if ((sctp_is_feature_off(inp
,
6114 SCTP_PCB_FLAGS_RECVDATAIOEVNT
)) ||
6115 (controlp
== NULL
)) {
6116 /* user does not want the sndrcv ctl */
6120 from
= (struct sockaddr
*)sockbuf
;
6121 fromlen
= sizeof(sockbuf
);
6128 error
= sctp_sorecvmsg(so
, uio
,
6129 (struct mbuf
**)NULL
,
6130 from
, fromlen
, flag
,
6131 (struct sctp_sndrcvinfo
*)&sinfo
,
6133 if ((controlp
) && (filling_sinfo
)) {
6135 * copy back the sinfo in a CMSG format note that the caller
6136 * has reponsibility for freeing the memory.
6139 *controlp
= sctp_build_ctl_cchunk(inp
,
6141 (struct sctp_sndrcvinfo
*)&sinfo
);
6144 /* copy back the address info */
6145 if (from
&& from
->sa_len
) {
6146 *name
= sodupsockaddr(from
, M_WAIT
);
6161 sctp_connectx_helper_add(struct sctp_tcb
*stcb
, struct sockaddr
*addr
,
6162 int totaddr
, int *error
)
6166 struct sctp_inpcb
*inp
;
6167 struct sockaddr
*sa
;
6171 inp
= stcb
->sctp_ep
;
6173 for (i
= 0; i
< totaddr
; i
++) {
6174 if (sa
->sa_family
== AF_INET
) {
6175 incr
= sizeof(struct sockaddr_in
);
6176 if (sctp_add_remote_addr(stcb
, sa
, SCTP_DONOT_SETSCOPE
, SCTP_ADDR_IS_CONFIRMED
)) {
6177 /* assoc gone no un-lock */
6178 SCTP_LTRACE_ERR_RET(NULL
, stcb
, NULL
, SCTP_FROM_SCTPUTIL
, ENOBUFS
);
6179 (void)sctp_free_assoc(inp
, stcb
, SCTP_NORMAL_PROC
, SCTP_FROM_SCTP_USRREQ
+ SCTP_LOC_7
);
6184 } else if (sa
->sa_family
== AF_INET6
) {
6185 incr
= sizeof(struct sockaddr_in6
);
6186 if (sctp_add_remote_addr(stcb
, sa
, SCTP_DONOT_SETSCOPE
, SCTP_ADDR_IS_CONFIRMED
)) {
6187 /* assoc gone no un-lock */
6188 SCTP_LTRACE_ERR_RET(NULL
, stcb
, NULL
, SCTP_FROM_SCTPUTIL
, ENOBUFS
);
6189 (void)sctp_free_assoc(inp
, stcb
, SCTP_NORMAL_PROC
, SCTP_FROM_SCTP_USRREQ
+ SCTP_LOC_8
);
6195 sa
= (struct sockaddr
*)((caddr_t
)sa
+ incr
);
6202 sctp_connectx_helper_find(struct sctp_inpcb
*inp
, struct sockaddr
*addr
,
6203 int *totaddr
, int *num_v4
, int *num_v6
, int *error
,
6204 int limit
, int *bad_addr
)
6206 struct sockaddr
*sa
;
6207 struct sctp_tcb
*stcb
= NULL
;
6212 *error
= *num_v6
= *num_v4
= 0;
6213 /* account and validate addresses */
6214 for (i
= 0; i
< (size_t)*totaddr
; i
++) {
6215 if (sa
->sa_family
== AF_INET
) {
6217 incr
= sizeof(struct sockaddr_in
);
6218 if (sa
->sa_len
!= incr
) {
6219 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EINVAL
);
6224 } else if (sa
->sa_family
== AF_INET6
) {
6225 struct sockaddr_in6
*sin6
;
6227 sin6
= (struct sockaddr_in6
*)sa
;
6228 if (IN6_IS_ADDR_V4MAPPED(&sin6
->sin6_addr
)) {
6229 /* Must be non-mapped for connectx */
6230 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EINVAL
);
6236 incr
= sizeof(struct sockaddr_in6
);
6237 if (sa
->sa_len
!= incr
) {
6238 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EINVAL
);
6248 SCTP_INP_INCR_REF(inp
);
6249 stcb
= sctp_findassociation_ep_addr(&inp
, sa
, NULL
, NULL
, NULL
);
6251 /* Already have or am bring up an association */
6254 SCTP_INP_DECR_REF(inp
);
6256 if ((at
+ incr
) > (size_t)limit
) {
6260 sa
= (struct sockaddr
*)((caddr_t
)sa
+ incr
);
6262 return ((struct sctp_tcb
*)NULL
);
6266 * sctp_bindx(ADD) for one address.
6267 * assumes all arguments are valid/checked by caller.
6270 sctp_bindx_add_address(struct socket
*so
, struct sctp_inpcb
*inp
,
6271 struct sockaddr
*sa
, sctp_assoc_t assoc_id
,
6272 uint32_t vrf_id
, int *error
, void *p
)
6274 struct sockaddr
*addr_touse
;
6277 struct sockaddr_in sin
;
6281 /* see if we're bound all already! */
6282 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) {
6283 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EINVAL
);
6288 #if defined(INET6) && !defined(__Userspace__) /* TODO port in6_sin6_2_sin */
6289 if (sa
->sa_family
== AF_INET6
) {
6290 struct sockaddr_in6
*sin6
;
6292 if (sa
->sa_len
!= sizeof(struct sockaddr_in6
)) {
6293 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EINVAL
);
6297 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) == 0) {
6298 /* can only bind v6 on PF_INET6 sockets */
6299 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EINVAL
);
6303 sin6
= (struct sockaddr_in6
*)addr_touse
;
6304 if (IN6_IS_ADDR_V4MAPPED(&sin6
->sin6_addr
)) {
6305 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) &&
6306 SCTP_IPV6_V6ONLY(inp
)) {
6307 /* can't bind v4-mapped on PF_INET sockets */
6308 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EINVAL
);
6312 in6_sin6_2_sin(&sin
, sin6
);
6313 addr_touse
= (struct sockaddr
*)&sin
;
6317 if (sa
->sa_family
== AF_INET
) {
6318 if (sa
->sa_len
!= sizeof(struct sockaddr_in
)) {
6319 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EINVAL
);
6323 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) &&
6324 SCTP_IPV6_V6ONLY(inp
)) {
6325 /* can't bind v4 on PF_INET sockets */
6326 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EINVAL
);
6331 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_UNBOUND
) {
6333 /* Can't get proc for Net/Open BSD */
6334 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EINVAL
);
6338 *error
= sctp_inpcb_bind(so
, addr_touse
, NULL
, p
);
6342 * No locks required here since bind and mgmt_ep_sa all do their own
6343 * locking. If we do something for the FIX: below we may need to
6344 * lock in that case.
6346 if (assoc_id
== 0) {
6347 /* add the address */
6348 struct sctp_inpcb
*lep
;
6349 struct sockaddr_in
*lsin
= (struct sockaddr_in
*)addr_touse
;
6351 /* validate the incoming port */
6352 if ((lsin
->sin_port
!= 0) &&
6353 (lsin
->sin_port
!= inp
->sctp_lport
)) {
6354 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EINVAL
);
6358 /* user specified 0 port, set it to existing port */
6359 lsin
->sin_port
= inp
->sctp_lport
;
6362 lep
= sctp_pcb_findep(addr_touse
, 1, 0, vrf_id
);
6365 * We must decrement the refcount since we have the
6366 * ep already and are binding. No remove going on
6369 SCTP_INP_DECR_REF(lep
);
6372 /* already bound to it.. ok */
6374 } else if (lep
== NULL
) {
6375 ((struct sockaddr_in
*)addr_touse
)->sin_port
= 0;
6376 *error
= sctp_addr_mgmt_ep_sa(inp
, addr_touse
,
6377 SCTP_ADD_IP_ADDRESS
,
6380 *error
= EADDRINUSE
;
6386 * FIX: decide whether we allow assoc based bindx
6392 * sctp_bindx(DELETE) for one address.
6393 * assumes all arguments are valid/checked by caller.
6396 sctp_bindx_delete_address(struct socket
*so
, struct sctp_inpcb
*inp
,
6397 struct sockaddr
*sa
, sctp_assoc_t assoc_id
,
6398 uint32_t vrf_id
, int *error
)
6400 struct sockaddr
*addr_touse
;
6403 struct sockaddr_in sin
;
6407 /* see if we're bound all already! */
6408 if (inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) {
6409 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EINVAL
);
6414 #if defined(INET6) && !defined(__Userspace__) /* TODO port in6_sin6_2_sin */
6415 if (sa
->sa_family
== AF_INET6
) {
6416 struct sockaddr_in6
*sin6
;
6418 if (sa
->sa_len
!= sizeof(struct sockaddr_in6
)) {
6419 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EINVAL
);
6423 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) == 0) {
6424 /* can only bind v6 on PF_INET6 sockets */
6425 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EINVAL
);
6429 sin6
= (struct sockaddr_in6
*)addr_touse
;
6430 if (IN6_IS_ADDR_V4MAPPED(&sin6
->sin6_addr
)) {
6431 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) &&
6432 SCTP_IPV6_V6ONLY(inp
)) {
6433 /* can't bind mapped-v4 on PF_INET sockets */
6434 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EINVAL
);
6438 in6_sin6_2_sin(&sin
, sin6
);
6439 addr_touse
= (struct sockaddr
*)&sin
;
6443 if (sa
->sa_family
== AF_INET
) {
6444 if (sa
->sa_len
!= sizeof(struct sockaddr_in
)) {
6445 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EINVAL
);
6449 if ((inp
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) &&
6450 SCTP_IPV6_V6ONLY(inp
)) {
6451 /* can't bind v4 on PF_INET sockets */
6452 SCTP_LTRACE_ERR_RET(inp
, NULL
, NULL
, SCTP_FROM_SCTPUTIL
, EINVAL
);
6458 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6459 * below is ever changed we may need to lock before calling
6460 * association level binding.
6462 if (assoc_id
== 0) {
6463 /* delete the address */
6464 *error
= sctp_addr_mgmt_ep_sa(inp
, addr_touse
,
6465 SCTP_DEL_IP_ADDRESS
,
6469 * FIX: decide whether we allow assoc based bindx
6475 * returns the valid local address count for an assoc, taking into account
6479 sctp_local_addr_count(struct sctp_tcb
*stcb
)
6481 int loopback_scope
, ipv4_local_scope
, local_scope
, site_scope
;
6482 int ipv4_addr_legal
, ipv6_addr_legal
;
6483 struct sctp_vrf
*vrf
;
6484 struct sctp_ifn
*sctp_ifn
;
6485 struct sctp_ifa
*sctp_ifa
;
6488 /* Turn on all the appropriate scopes */
6489 loopback_scope
= stcb
->asoc
.loopback_scope
;
6490 ipv4_local_scope
= stcb
->asoc
.ipv4_local_scope
;
6491 local_scope
= stcb
->asoc
.local_scope
;
6492 site_scope
= stcb
->asoc
.site_scope
;
6493 ipv4_addr_legal
= ipv6_addr_legal
= 0;
6494 if (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_BOUND_V6
) {
6495 ipv6_addr_legal
= 1;
6496 if (SCTP_IPV6_V6ONLY(stcb
->sctp_ep
) == 0) {
6497 ipv4_addr_legal
= 1;
6500 ipv4_addr_legal
= 1;
6503 SCTP_IPI_ADDR_RLOCK();
6504 vrf
= sctp_find_vrf(stcb
->asoc
.vrf_id
);
6506 /* no vrf, no addresses */
6507 SCTP_IPI_ADDR_RUNLOCK();
6510 if (stcb
->sctp_ep
->sctp_flags
& SCTP_PCB_FLAGS_BOUNDALL
) {
6512 * bound all case: go through all ifns on the vrf
6514 LIST_FOREACH(sctp_ifn
, &vrf
->ifnlist
, next_ifn
) {
6515 if ((loopback_scope
== 0) &&
6516 SCTP_IFN_IS_IFT_LOOP(sctp_ifn
)) {
6519 LIST_FOREACH(sctp_ifa
, &sctp_ifn
->ifalist
, next_ifa
) {
6520 if (sctp_is_addr_restricted(stcb
, sctp_ifa
))
6522 switch (sctp_ifa
->address
.sa
.sa_family
) {
6524 if (ipv4_addr_legal
) {
6525 struct sockaddr_in
*sin
;
6527 sin
= (struct sockaddr_in
*)&sctp_ifa
->address
.sa
;
6528 if (sin
->sin_addr
.s_addr
== 0) {
6535 if ((ipv4_local_scope
== 0) &&
6536 (IN4_ISPRIVATE_ADDRESS(&sin
->sin_addr
))) {
6539 /* count this one */
6547 if (ipv6_addr_legal
) {
6548 struct sockaddr_in6
*sin6
;
6550 sin6
= (struct sockaddr_in6
*)&sctp_ifa
->address
.sa
;
6551 if (IN6_IS_ADDR_UNSPECIFIED(&sin6
->sin6_addr
)) {
6554 if (IN6_IS_ADDR_LINKLOCAL(&sin6
->sin6_addr
)) {
6555 if (local_scope
== 0)
6557 if (sin6
->sin6_scope_id
== 0) {
6558 if (sa6_recoverscope(sin6
) != 0)
6576 if ((site_scope
== 0) &&
6577 (IN6_IS_ADDR_SITELOCAL(&sin6
->sin6_addr
))) {
6580 /* count this one */
6595 struct sctp_laddr
*laddr
;
6597 LIST_FOREACH(laddr
, &stcb
->sctp_ep
->sctp_addr_list
,
6599 if (sctp_is_addr_restricted(stcb
, laddr
->ifa
)) {
6602 /* count this one */
6606 SCTP_IPI_ADDR_RUNLOCK();
6610 #if defined(SCTP_LOCAL_TRACE_BUF)
6613 sctp_log_trace(uint32_t subsys
, const char *str SCTP_UNUSED
, uint32_t a
, uint32_t b
, uint32_t c
, uint32_t d
, uint32_t e
, uint32_t f
)
6615 uint32_t saveindex
, newindex
;
6618 saveindex
= SCTP_BASE_SYSCTL(sctp_log
).index
;
6619 if (saveindex
>= SCTP_MAX_LOGGING_SIZE
) {
6622 newindex
= saveindex
+ 1;
6624 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log
).index
, saveindex
, newindex
) == 0);
6625 if (saveindex
>= SCTP_MAX_LOGGING_SIZE
) {
6628 SCTP_BASE_SYSCTL(sctp_log
).entry
[saveindex
].timestamp
= SCTP_GET_CYCLECOUNT
;
6629 SCTP_BASE_SYSCTL(sctp_log
).entry
[saveindex
].subsys
= subsys
;
6630 SCTP_BASE_SYSCTL(sctp_log
).entry
[saveindex
].params
[0] = a
;
6631 SCTP_BASE_SYSCTL(sctp_log
).entry
[saveindex
].params
[1] = b
;
6632 SCTP_BASE_SYSCTL(sctp_log
).entry
[saveindex
].params
[2] = c
;
6633 SCTP_BASE_SYSCTL(sctp_log
).entry
[saveindex
].params
[3] = d
;
6634 SCTP_BASE_SYSCTL(sctp_log
).entry
[saveindex
].params
[4] = e
;
6635 SCTP_BASE_SYSCTL(sctp_log
).entry
[saveindex
].params
[5] = f
;
6639 /* We will need to add support
6640 * to bind the ports and such here
6641 * so we can do UDP tunneling. In
6642 * the mean-time, we return error
6646 sctp_over_udp_stop(void)
6651 sctp_over_udp_start(void)