2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #ifndef _NETINET_SCTP_VAR_H_
36 #define _NETINET_SCTP_VAR_H_
38 #include <netinet/sctp_uio.h>
40 #if defined(_KERNEL) || defined(__Userspace__)
42 extern struct protosw sctp_seqpacket_protosw
, sctp_stream_protosw
;
44 #define sctp_feature_on(inp, feature) (inp->sctp_features |= feature)
45 #define sctp_feature_off(inp, feature) (inp->sctp_features &= ~feature)
46 #define sctp_is_feature_on(inp, feature) ((inp->sctp_features & feature) == feature)
47 #define sctp_is_feature_off(inp, feature) ((inp->sctp_features & feature) == 0)
49 #define sctp_stcb_feature_on(inp, stcb, feature) {\
51 stcb->asoc.sctp_features |= feature; \
53 inp->sctp_features |= feature; \
56 #define sctp_stcb_feature_off(inp, stcb, feature) {\
58 stcb->asoc.sctp_features &= ~feature; \
60 inp->sctp_features &= ~feature; \
63 #define sctp_stcb_is_feature_on(inp, stcb, feature) \
65 ((stcb->asoc.sctp_features & feature) == feature)) || \
66 ((stcb == NULL) && (inp != NULL) && \
67 ((inp->sctp_features & feature) == feature)))
68 #define sctp_stcb_is_feature_off(inp, stcb, feature) \
70 ((stcb->asoc.sctp_features & feature) == 0)) || \
71 ((stcb == NULL) && (inp != NULL) && \
72 ((inp->sctp_features & feature) == 0)) || \
73 ((stcb == NULL) && (inp == NULL)))
75 /* managing mobility_feature in inpcb (by micchie) */
76 #define sctp_mobility_feature_on(inp, feature) (inp->sctp_mobility_features |= feature)
77 #define sctp_mobility_feature_off(inp, feature) (inp->sctp_mobility_features &= ~feature)
78 #define sctp_is_mobility_feature_on(inp, feature) (inp->sctp_mobility_features & feature)
79 #define sctp_is_mobility_feature_off(inp, feature) ((inp->sctp_mobility_features & feature) == 0)
81 #define sctp_maxspace(sb) (max((sb)->sb_hiwat,SCTP_MINIMAL_RWND))
83 #define sctp_sbspace(asoc, sb) ((long) ((sctp_maxspace(sb) > (asoc)->sb_cc) ? (sctp_maxspace(sb) - (asoc)->sb_cc) : 0))
85 #define sctp_sbspace_failedmsgs(sb) ((long) ((sctp_maxspace(sb) > SCTP_SBAVAIL(sb)) ? (sctp_maxspace(sb) - SCTP_SBAVAIL(sb)) : 0))
87 #define sctp_sbspace_sub(a,b) (((a) > (b)) ? ((a) - (b)) : 0)
90 * I tried to cache the readq entries at one point. But the reality
91 * is that it did not add any performance since this meant we had to
92 * lock the STCB on read. And at that point once you have to do an
93 * extra lock, it really does not matter if the lock is in the ZONE
94 * stuff or in our code. Note that this same problem would occur with
95 * an mbuf cache as well so it is not really worth doing, at least
99 #define sctp_free_a_readq(_stcb, _readq) { \
100 if ((_readq)->on_strm_q) \
101 panic("On strm q stcb:%p readq:%p", (_stcb), (_readq)); \
102 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), (_readq)); \
103 SCTP_DECR_READQ_COUNT(); \
106 #define sctp_free_a_readq(_stcb, _readq) { \
107 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), (_readq)); \
108 SCTP_DECR_READQ_COUNT(); \
112 #define sctp_alloc_a_readq(_stcb, _readq) { \
113 (_readq) = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_readq), struct sctp_queued_to_read); \
115 SCTP_INCR_READQ_COUNT(); \
119 #define sctp_free_a_strmoq(_stcb, _strmoq, _so_locked) { \
120 if ((_strmoq)->holds_key_ref) { \
121 sctp_auth_key_release(stcb, sp->auth_keyid, _so_locked); \
122 (_strmoq)->holds_key_ref = 0; \
124 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_strmoq), (_strmoq)); \
125 SCTP_DECR_STRMOQ_COUNT(); \
128 #define sctp_alloc_a_strmoq(_stcb, _strmoq) { \
129 (_strmoq) = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_strmoq), struct sctp_stream_queue_pending); \
131 memset(_strmoq, 0, sizeof(struct sctp_stream_queue_pending)); \
132 SCTP_INCR_STRMOQ_COUNT(); \
133 (_strmoq)->holds_key_ref = 0; \
137 #define sctp_free_a_chunk(_stcb, _chk, _so_locked) { \
138 if ((_chk)->holds_key_ref) {\
139 sctp_auth_key_release((_stcb), (_chk)->auth_keyid, _so_locked); \
140 (_chk)->holds_key_ref = 0; \
143 SCTP_TCB_LOCK_ASSERT((_stcb)); \
144 if ((_chk)->whoTo) { \
145 sctp_free_remote_addr((_chk)->whoTo); \
146 (_chk)->whoTo = NULL; \
148 if (((_stcb)->asoc.free_chunk_cnt > SCTP_BASE_SYSCTL(sctp_asoc_free_resc_limit)) || \
149 (SCTP_BASE_INFO(ipi_free_chunks) > SCTP_BASE_SYSCTL(sctp_system_free_resc_limit))) { \
150 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), (_chk)); \
151 SCTP_DECR_CHK_COUNT(); \
153 TAILQ_INSERT_TAIL(&(_stcb)->asoc.free_chunks, (_chk), sctp_next); \
154 (_stcb)->asoc.free_chunk_cnt++; \
155 atomic_add_int(&SCTP_BASE_INFO(ipi_free_chunks), 1); \
158 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), (_chk)); \
159 SCTP_DECR_CHK_COUNT(); \
163 #define sctp_alloc_a_chunk(_stcb, _chk) { \
164 if (TAILQ_EMPTY(&(_stcb)->asoc.free_chunks)) { \
165 (_chk) = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_chunk), struct sctp_tmit_chunk); \
167 SCTP_INCR_CHK_COUNT(); \
168 (_chk)->whoTo = NULL; \
169 (_chk)->holds_key_ref = 0; \
172 (_chk) = TAILQ_FIRST(&(_stcb)->asoc.free_chunks); \
173 TAILQ_REMOVE(&(_stcb)->asoc.free_chunks, (_chk), sctp_next); \
174 atomic_subtract_int(&SCTP_BASE_INFO(ipi_free_chunks), 1); \
175 (_chk)->holds_key_ref = 0; \
176 SCTP_STAT_INCR(sctps_cached_chk); \
177 (_stcb)->asoc.free_chunk_cnt--; \
181 #define sctp_free_remote_addr(__net) { \
183 if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&(__net)->ref_count)) { \
184 RO_NHFREE(&(__net)->ro); \
185 if ((__net)->src_addr_selected) { \
186 sctp_free_ifa((__net)->ro._s_addr); \
187 (__net)->ro._s_addr = NULL; \
189 (__net)->src_addr_selected = 0; \
190 (__net)->dest_state &= ~SCTP_ADDR_REACHABLE; \
191 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_net), (__net)); \
192 SCTP_DECR_RADDR_COUNT(); \
197 #define sctp_sbfree(ctl, stcb, sb, m) { \
198 SCTP_SB_DECR(sb, SCTP_BUF_LEN((m))); \
199 SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_mbcnt, MSIZE); \
200 if (((ctl)->do_not_ref_stcb == 0) && stcb) {\
201 SCTP_SAVE_ATOMIC_DECREMENT(&(stcb)->asoc.sb_cc, SCTP_BUF_LEN((m))); \
202 SCTP_SAVE_ATOMIC_DECREMENT(&(stcb)->asoc.my_rwnd_control_len, MSIZE); \
204 if (SCTP_BUF_TYPE(m) != MT_DATA && SCTP_BUF_TYPE(m) != MT_HEADER && \
205 SCTP_BUF_TYPE(m) != MT_OOBDATA) \
206 atomic_subtract_int(&(sb)->sb_ctl,SCTP_BUF_LEN((m))); \
209 #define sctp_sballoc(stcb, sb, m) { \
210 SCTP_SB_INCR(sb, SCTP_BUF_LEN((m))); \
211 atomic_add_int(&(sb)->sb_mbcnt, MSIZE); \
213 atomic_add_int(&(stcb)->asoc.sb_cc, SCTP_BUF_LEN((m))); \
214 atomic_add_int(&(stcb)->asoc.my_rwnd_control_len, MSIZE); \
216 if (SCTP_BUF_TYPE(m) != MT_DATA && SCTP_BUF_TYPE(m) != MT_HEADER && \
217 SCTP_BUF_TYPE(m) != MT_OOBDATA) \
218 atomic_add_int(&(sb)->sb_ctl,SCTP_BUF_LEN((m))); \
221 #define sctp_ucount_incr(val) { \
225 #define sctp_ucount_decr(val) { \
233 #define sctp_mbuf_crush(data) do { \
236 while (_m && (SCTP_BUF_LEN(_m) == 0)) { \
237 (data) = SCTP_BUF_NEXT(_m); \
238 SCTP_BUF_NEXT(_m) = NULL; \
244 #define sctp_flight_size_decrease(tp1) do { \
245 if (tp1->whoTo->flight_size >= tp1->book_size) \
246 tp1->whoTo->flight_size -= tp1->book_size; \
248 tp1->whoTo->flight_size = 0; \
251 #define sctp_flight_size_increase(tp1) do { \
252 (tp1)->whoTo->flight_size += (tp1)->book_size; \
255 #ifdef SCTP_FS_SPEC_LOG
256 #define sctp_total_flight_decrease(stcb, tp1) do { \
257 if (stcb->asoc.fs_index > SCTP_FS_SPEC_LOG_SIZE) \
258 stcb->asoc.fs_index = 0;\
259 stcb->asoc.fslog[stcb->asoc.fs_index].total_flight = stcb->asoc.total_flight; \
260 stcb->asoc.fslog[stcb->asoc.fs_index].tsn = tp1->rec.data.tsn; \
261 stcb->asoc.fslog[stcb->asoc.fs_index].book = tp1->book_size; \
262 stcb->asoc.fslog[stcb->asoc.fs_index].sent = tp1->sent; \
263 stcb->asoc.fslog[stcb->asoc.fs_index].incr = 0; \
264 stcb->asoc.fslog[stcb->asoc.fs_index].decr = 1; \
265 stcb->asoc.fs_index++; \
266 tp1->window_probe = 0; \
267 if (stcb->asoc.total_flight >= tp1->book_size) { \
268 stcb->asoc.total_flight -= tp1->book_size; \
269 if (stcb->asoc.total_flight_count > 0) \
270 stcb->asoc.total_flight_count--; \
272 stcb->asoc.total_flight = 0; \
273 stcb->asoc.total_flight_count = 0; \
277 #define sctp_total_flight_increase(stcb, tp1) do { \
278 if (stcb->asoc.fs_index > SCTP_FS_SPEC_LOG_SIZE) \
279 stcb->asoc.fs_index = 0;\
280 stcb->asoc.fslog[stcb->asoc.fs_index].total_flight = stcb->asoc.total_flight; \
281 stcb->asoc.fslog[stcb->asoc.fs_index].tsn = tp1->rec.data.tsn; \
282 stcb->asoc.fslog[stcb->asoc.fs_index].book = tp1->book_size; \
283 stcb->asoc.fslog[stcb->asoc.fs_index].sent = tp1->sent; \
284 stcb->asoc.fslog[stcb->asoc.fs_index].incr = 1; \
285 stcb->asoc.fslog[stcb->asoc.fs_index].decr = 0; \
286 stcb->asoc.fs_index++; \
287 (stcb)->asoc.total_flight_count++; \
288 (stcb)->asoc.total_flight += (tp1)->book_size; \
293 #define sctp_total_flight_decrease(stcb, tp1) do { \
294 tp1->window_probe = 0; \
295 if (stcb->asoc.total_flight >= tp1->book_size) { \
296 stcb->asoc.total_flight -= tp1->book_size; \
297 if (stcb->asoc.total_flight_count > 0) \
298 stcb->asoc.total_flight_count--; \
300 stcb->asoc.total_flight = 0; \
301 stcb->asoc.total_flight_count = 0; \
305 #define sctp_total_flight_increase(stcb, tp1) do { \
306 (stcb)->asoc.total_flight_count++; \
307 (stcb)->asoc.total_flight += (tp1)->book_size; \
312 #define SCTP_PF_ENABLED(_net) (_net->pf_threshold < _net->failure_threshold)
313 #define SCTP_NET_IS_PF(_net) (_net->pf_threshold < _net->error_count)
320 void sctp_close(struct socket
*so
);
321 void sctp_abort(struct socket
*so
);
322 int sctp_disconnect(struct socket
*so
);
323 ipproto_ctlinput_t sctp_ctlinput
;
324 int sctp_ctloutput(struct socket
*, struct sockopt
*);
326 void sctp_input_with_port(struct mbuf
*, int, uint16_t);
327 int sctp_input(struct mbuf
**, int *, int);
329 void sctp_pathmtu_adjustment(struct sctp_tcb
*, uint32_t, bool);
331 sctp_notify(struct sctp_inpcb
*, struct sctp_tcb
*, struct sctp_nets
*,
332 uint8_t, uint8_t, uint16_t, uint32_t);
333 int sctp_flush(struct socket
*, int);
334 int sctp_shutdown(struct socket
*, enum shutdown_how
);
336 sctp_bindx(struct socket
*, int, struct sockaddr_storage
*,
337 int, int, struct proc
*);
339 /* can't use sctp_assoc_t here */
340 int sctp_peeloff(struct socket
*, struct socket
*, int, caddr_t
, int *);
341 int sctp_ingetaddr(struct socket
*, struct sockaddr
*);
342 int sctp_peeraddr(struct socket
*, struct sockaddr
*);
343 int sctp_listen(struct socket
*, int, struct thread
*);
344 int sctp_accept(struct socket
*, struct sockaddr
*);
348 #endif /* !_NETINET_SCTP_VAR_H_ */