1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
6 * This file is part of the SCTP kernel implementation
8 * These functions handle output processing.
10 * This SCTP implementation is free software;
11 * you can redistribute it and/or modify it under the terms of
12 * the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * This SCTP implementation is distributed in the hope that it
17 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
18 * ************************
19 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
20 * See the GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with GNU CC; see the file COPYING. If not, see
24 * <http://www.gnu.org/licenses/>.
26 * Please send any bug reports or fixes you make to the
28 * lksctp developers <linux-sctp@vger.kernel.org>
30 * Written or modified by:
31 * La Monte H.P. Yarroll <piggy@acm.org>
32 * Karl Knutson <karl@athena.chicago.il.us>
33 * Jon Grimm <jgrimm@austin.ibm.com>
34 * Sridhar Samudrala <sri@us.ibm.com>
37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39 #include <linux/types.h>
40 #include <linux/kernel.h>
41 #include <linux/wait.h>
42 #include <linux/time.h>
44 #include <linux/ipv6.h>
45 #include <linux/init.h>
46 #include <linux/slab.h>
47 #include <net/inet_ecn.h>
50 #include <net/net_namespace.h>
52 #include <linux/socket.h> /* for sa_family_t */
55 #include <net/sctp/sctp.h>
56 #include <net/sctp/sm.h>
57 #include <net/sctp/checksum.h>
59 /* Forward declarations for private helpers. */
60 static enum sctp_xmit
__sctp_packet_append_chunk(struct sctp_packet
*packet
,
61 struct sctp_chunk
*chunk
);
62 static enum sctp_xmit
sctp_packet_can_append_data(struct sctp_packet
*packet
,
63 struct sctp_chunk
*chunk
);
64 static void sctp_packet_append_data(struct sctp_packet
*packet
,
65 struct sctp_chunk
*chunk
);
66 static enum sctp_xmit
sctp_packet_will_fit(struct sctp_packet
*packet
,
67 struct sctp_chunk
*chunk
,
70 static void sctp_packet_reset(struct sctp_packet
*packet
)
72 /* sctp_packet_transmit() relies on this to reset size to the
73 * current overhead after sending packets.
75 packet
->size
= packet
->overhead
;
77 packet
->has_cookie_echo
= 0;
86 * This appears to be a followup set of initializations.
88 void sctp_packet_config(struct sctp_packet
*packet
, __u32 vtag
,
91 struct sctp_transport
*tp
= packet
->transport
;
92 struct sctp_association
*asoc
= tp
->asoc
;
93 struct sctp_sock
*sp
= NULL
;
96 pr_debug("%s: packet:%p vtag:0x%x\n", __func__
, packet
, vtag
);
99 /* do the following jobs only once for a flush schedule */
100 if (!sctp_packet_empty(packet
))
103 /* set packet max_size with pathmtu, then calculate overhead */
104 packet
->max_size
= tp
->pathmtu
;
110 packet
->overhead
= sctp_mtu_payload(sp
, 0, 0);
111 packet
->size
= packet
->overhead
;
116 /* update dst or transport pathmtu if in need */
117 if (!sctp_transport_dst_check(tp
)) {
118 sctp_transport_route(tp
, NULL
, sp
);
119 if (asoc
->param_flags
& SPP_PMTUD_ENABLE
)
120 sctp_assoc_sync_pmtu(asoc
);
121 } else if (!sctp_transport_pmtu_check(tp
)) {
122 if (asoc
->param_flags
& SPP_PMTUD_ENABLE
)
123 sctp_assoc_sync_pmtu(asoc
);
126 if (asoc
->pmtu_pending
) {
127 if (asoc
->param_flags
& SPP_PMTUD_ENABLE
)
128 sctp_assoc_sync_pmtu(asoc
);
129 asoc
->pmtu_pending
= 0;
132 /* If there a is a prepend chunk stick it on the list before
133 * any other chunks get appended.
136 struct sctp_chunk
*chunk
= sctp_get_ecne_prepend(asoc
);
139 sctp_packet_append_chunk(packet
, chunk
);
145 /* set packet max_size with gso_max_size if gso is enabled*/
147 if (__sk_dst_get(sk
) != tp
->dst
) {
149 sk_setup_caps(sk
, tp
->dst
);
151 packet
->max_size
= sk_can_gso(sk
) ? tp
->dst
->dev
->gso_max_size
156 /* Initialize the packet structure. */
157 void sctp_packet_init(struct sctp_packet
*packet
,
158 struct sctp_transport
*transport
,
159 __u16 sport
, __u16 dport
)
161 pr_debug("%s: packet:%p transport:%p\n", __func__
, packet
, transport
);
163 packet
->transport
= transport
;
164 packet
->source_port
= sport
;
165 packet
->destination_port
= dport
;
166 INIT_LIST_HEAD(&packet
->chunk_list
);
167 /* The overhead will be calculated by sctp_packet_config() */
168 packet
->overhead
= 0;
169 sctp_packet_reset(packet
);
174 void sctp_packet_free(struct sctp_packet
*packet
)
176 struct sctp_chunk
*chunk
, *tmp
;
178 pr_debug("%s: packet:%p\n", __func__
, packet
);
180 list_for_each_entry_safe(chunk
, tmp
, &packet
->chunk_list
, list
) {
181 list_del_init(&chunk
->list
);
182 sctp_chunk_free(chunk
);
186 /* This routine tries to append the chunk to the offered packet. If adding
187 * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk
188 * is not present in the packet, it transmits the input packet.
189 * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long
190 * as it can fit in the packet, but any more data that does not fit in this
191 * packet can be sent only after receiving the COOKIE_ACK.
193 enum sctp_xmit
sctp_packet_transmit_chunk(struct sctp_packet
*packet
,
194 struct sctp_chunk
*chunk
,
195 int one_packet
, gfp_t gfp
)
197 enum sctp_xmit retval
;
199 pr_debug("%s: packet:%p size:%zu chunk:%p size:%d\n", __func__
,
200 packet
, packet
->size
, chunk
, chunk
->skb
? chunk
->skb
->len
: -1);
202 switch ((retval
= (sctp_packet_append_chunk(packet
, chunk
)))) {
203 case SCTP_XMIT_PMTU_FULL
:
204 if (!packet
->has_cookie_echo
) {
207 error
= sctp_packet_transmit(packet
, gfp
);
209 chunk
->skb
->sk
->sk_err
= -error
;
211 /* If we have an empty packet, then we can NOT ever
215 retval
= sctp_packet_append_chunk(packet
,
220 case SCTP_XMIT_RWND_FULL
:
222 case SCTP_XMIT_DELAY
:
229 /* Try to bundle an auth chunk into the packet. */
230 static enum sctp_xmit
sctp_packet_bundle_auth(struct sctp_packet
*pkt
,
231 struct sctp_chunk
*chunk
)
233 struct sctp_association
*asoc
= pkt
->transport
->asoc
;
234 enum sctp_xmit retval
= SCTP_XMIT_OK
;
235 struct sctp_chunk
*auth
;
237 /* if we don't have an association, we can't do authentication */
241 /* See if this is an auth chunk we are bundling or if
242 * auth is already bundled.
244 if (chunk
->chunk_hdr
->type
== SCTP_CID_AUTH
|| pkt
->has_auth
)
247 /* if the peer did not request this chunk to be authenticated,
253 auth
= sctp_make_auth(asoc
, chunk
->shkey
->key_id
);
257 auth
->shkey
= chunk
->shkey
;
258 sctp_auth_shkey_hold(auth
->shkey
);
260 retval
= __sctp_packet_append_chunk(pkt
, auth
);
262 if (retval
!= SCTP_XMIT_OK
)
263 sctp_chunk_free(auth
);
268 /* Try to bundle a SACK with the packet. */
269 static enum sctp_xmit
sctp_packet_bundle_sack(struct sctp_packet
*pkt
,
270 struct sctp_chunk
*chunk
)
272 enum sctp_xmit retval
= SCTP_XMIT_OK
;
274 /* If sending DATA and haven't aleady bundled a SACK, try to
275 * bundle one in to the packet.
277 if (sctp_chunk_is_data(chunk
) && !pkt
->has_sack
&&
278 !pkt
->has_cookie_echo
) {
279 struct sctp_association
*asoc
;
280 struct timer_list
*timer
;
281 asoc
= pkt
->transport
->asoc
;
282 timer
= &asoc
->timers
[SCTP_EVENT_TIMEOUT_SACK
];
284 /* If the SACK timer is running, we have a pending SACK */
285 if (timer_pending(timer
)) {
286 struct sctp_chunk
*sack
;
288 if (pkt
->transport
->sack_generation
!=
289 pkt
->transport
->asoc
->peer
.sack_generation
)
292 asoc
->a_rwnd
= asoc
->rwnd
;
293 sack
= sctp_make_sack(asoc
);
295 retval
= __sctp_packet_append_chunk(pkt
, sack
);
296 if (retval
!= SCTP_XMIT_OK
) {
297 sctp_chunk_free(sack
);
300 asoc
->peer
.sack_needed
= 0;
301 if (del_timer(timer
))
302 sctp_association_put(asoc
);
311 /* Append a chunk to the offered packet reporting back any inability to do
314 static enum sctp_xmit
__sctp_packet_append_chunk(struct sctp_packet
*packet
,
315 struct sctp_chunk
*chunk
)
317 __u16 chunk_len
= SCTP_PAD4(ntohs(chunk
->chunk_hdr
->length
));
318 enum sctp_xmit retval
= SCTP_XMIT_OK
;
320 /* Check to see if this chunk will fit into the packet */
321 retval
= sctp_packet_will_fit(packet
, chunk
, chunk_len
);
322 if (retval
!= SCTP_XMIT_OK
)
325 /* We believe that this chunk is OK to add to the packet */
326 switch (chunk
->chunk_hdr
->type
) {
328 case SCTP_CID_I_DATA
:
329 /* Account for the data being in the packet */
330 sctp_packet_append_data(packet
, chunk
);
331 /* Disallow SACK bundling after DATA. */
332 packet
->has_sack
= 1;
333 /* Disallow AUTH bundling after DATA */
334 packet
->has_auth
= 1;
335 /* Let it be knows that packet has DATA in it */
336 packet
->has_data
= 1;
337 /* timestamp the chunk for rtx purposes */
338 chunk
->sent_at
= jiffies
;
339 /* Mainly used for prsctp RTX policy */
342 case SCTP_CID_COOKIE_ECHO
:
343 packet
->has_cookie_echo
= 1;
347 packet
->has_sack
= 1;
349 chunk
->asoc
->stats
.osacks
++;
353 packet
->has_auth
= 1;
354 packet
->auth
= chunk
;
358 /* It is OK to send this chunk. */
359 list_add_tail(&chunk
->list
, &packet
->chunk_list
);
360 packet
->size
+= chunk_len
;
361 chunk
->transport
= packet
->transport
;
366 /* Append a chunk to the offered packet reporting back any inability to do
369 enum sctp_xmit
sctp_packet_append_chunk(struct sctp_packet
*packet
,
370 struct sctp_chunk
*chunk
)
372 enum sctp_xmit retval
= SCTP_XMIT_OK
;
374 pr_debug("%s: packet:%p chunk:%p\n", __func__
, packet
, chunk
);
376 /* Data chunks are special. Before seeing what else we can
377 * bundle into this packet, check to see if we are allowed to
380 if (sctp_chunk_is_data(chunk
)) {
381 retval
= sctp_packet_can_append_data(packet
, chunk
);
382 if (retval
!= SCTP_XMIT_OK
)
386 /* Try to bundle AUTH chunk */
387 retval
= sctp_packet_bundle_auth(packet
, chunk
);
388 if (retval
!= SCTP_XMIT_OK
)
391 /* Try to bundle SACK chunk */
392 retval
= sctp_packet_bundle_sack(packet
, chunk
);
393 if (retval
!= SCTP_XMIT_OK
)
396 retval
= __sctp_packet_append_chunk(packet
, chunk
);
402 static void sctp_packet_gso_append(struct sk_buff
*head
, struct sk_buff
*skb
)
404 if (SCTP_OUTPUT_CB(head
)->last
== head
)
405 skb_shinfo(head
)->frag_list
= skb
;
407 SCTP_OUTPUT_CB(head
)->last
->next
= skb
;
408 SCTP_OUTPUT_CB(head
)->last
= skb
;
410 head
->truesize
+= skb
->truesize
;
411 head
->data_len
+= skb
->len
;
412 head
->len
+= skb
->len
;
413 refcount_add(skb
->truesize
, &head
->sk
->sk_wmem_alloc
);
415 __skb_header_release(skb
);
418 static int sctp_packet_pack(struct sctp_packet
*packet
,
419 struct sk_buff
*head
, int gso
, gfp_t gfp
)
421 struct sctp_transport
*tp
= packet
->transport
;
422 struct sctp_auth_chunk
*auth
= NULL
;
423 struct sctp_chunk
*chunk
, *tmp
;
424 int pkt_count
= 0, pkt_size
;
425 struct sock
*sk
= head
->sk
;
426 struct sk_buff
*nskb
;
430 skb_shinfo(head
)->gso_type
= sk
->sk_gso_type
;
431 SCTP_OUTPUT_CB(head
)->last
= head
;
434 pkt_size
= packet
->size
;
439 /* calculate the pkt_size and alloc nskb */
440 pkt_size
= packet
->overhead
;
441 list_for_each_entry_safe(chunk
, tmp
, &packet
->chunk_list
,
443 int padded
= SCTP_PAD4(chunk
->skb
->len
);
445 if (chunk
== packet
->auth
)
447 else if (auth_len
+ padded
+ packet
->overhead
>
450 else if (pkt_size
+ padded
> tp
->pathmtu
)
454 nskb
= alloc_skb(pkt_size
+ MAX_HEADER
, gfp
);
457 skb_reserve(nskb
, packet
->overhead
+ MAX_HEADER
);
460 /* merge chunks into nskb and append nskb into head list */
461 pkt_size
-= packet
->overhead
;
462 list_for_each_entry_safe(chunk
, tmp
, &packet
->chunk_list
, list
) {
465 list_del_init(&chunk
->list
);
466 if (sctp_chunk_is_data(chunk
)) {
467 if (!sctp_chunk_retransmitted(chunk
) &&
469 chunk
->rtt_in_progress
= 1;
474 padding
= SCTP_PAD4(chunk
->skb
->len
) - chunk
->skb
->len
;
476 skb_put_zero(chunk
->skb
, padding
);
478 if (chunk
== packet
->auth
)
479 auth
= (struct sctp_auth_chunk
*)
480 skb_tail_pointer(nskb
);
482 skb_put_data(nskb
, chunk
->skb
->data
, chunk
->skb
->len
);
484 pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n",
486 sctp_cname(SCTP_ST_CHUNK(chunk
->chunk_hdr
->type
)),
487 chunk
->has_tsn
? "TSN" : "No TSN",
488 chunk
->has_tsn
? ntohl(chunk
->subh
.data_hdr
->tsn
) : 0,
489 ntohs(chunk
->chunk_hdr
->length
), chunk
->skb
->len
,
490 chunk
->rtt_in_progress
);
492 pkt_size
-= SCTP_PAD4(chunk
->skb
->len
);
494 if (!sctp_chunk_is_data(chunk
) && chunk
!= packet
->auth
)
495 sctp_chunk_free(chunk
);
502 sctp_auth_calculate_hmac(tp
->asoc
, nskb
, auth
,
503 packet
->auth
->shkey
, gfp
);
504 /* free auth if no more chunks, or add it back */
505 if (list_empty(&packet
->chunk_list
))
506 sctp_chunk_free(packet
->auth
);
508 list_add(&packet
->auth
->list
,
509 &packet
->chunk_list
);
513 sctp_packet_gso_append(head
, nskb
);
516 } while (!list_empty(&packet
->chunk_list
));
519 memset(head
->cb
, 0, max(sizeof(struct inet_skb_parm
),
520 sizeof(struct inet6_skb_parm
)));
521 skb_shinfo(head
)->gso_segs
= pkt_count
;
522 skb_shinfo(head
)->gso_size
= GSO_BY_FRAGS
;
524 if (skb_dst(head
) != tp
->dst
) {
526 sk_setup_caps(sk
, tp
->dst
);
532 if (sctp_checksum_disable
)
535 if (!(skb_dst(head
)->dev
->features
& NETIF_F_SCTP_CRC
) ||
536 dst_xfrm(skb_dst(head
)) || packet
->ipfragok
) {
538 (struct sctphdr
*)skb_transport_header(head
);
540 sh
->checksum
= sctp_compute_cksum(head
, 0);
543 head
->ip_summed
= CHECKSUM_PARTIAL
;
544 head
->csum_not_inet
= 1;
545 head
->csum_start
= skb_transport_header(head
) - head
->head
;
546 head
->csum_offset
= offsetof(struct sctphdr
, checksum
);
552 /* All packets are sent to the network through this function from
555 * The return value is always 0 for now.
557 int sctp_packet_transmit(struct sctp_packet
*packet
, gfp_t gfp
)
559 struct sctp_transport
*tp
= packet
->transport
;
560 struct sctp_association
*asoc
= tp
->asoc
;
561 struct sctp_chunk
*chunk
, *tmp
;
562 int pkt_count
, gso
= 0;
563 struct dst_entry
*dst
;
564 struct sk_buff
*head
;
568 pr_debug("%s: packet:%p\n", __func__
, packet
);
569 if (list_empty(&packet
->chunk_list
))
571 chunk
= list_entry(packet
->chunk_list
.next
, struct sctp_chunk
, list
);
575 if (packet
->size
> tp
->pathmtu
&& !packet
->ipfragok
) {
576 if (!sk_can_gso(sk
)) {
577 pr_err_once("Trying to GSO but underlying device doesn't support it.");
584 head
= alloc_skb((gso
? packet
->overhead
: packet
->size
) +
588 skb_reserve(head
, packet
->overhead
+ MAX_HEADER
);
589 skb_set_owner_w(head
, sk
);
591 /* set sctp header */
592 sh
= skb_push(head
, sizeof(struct sctphdr
));
593 skb_reset_transport_header(head
);
594 sh
->source
= htons(packet
->source_port
);
595 sh
->dest
= htons(packet
->destination_port
);
596 sh
->vtag
= htonl(packet
->vtag
);
599 /* drop packet if no dst */
600 dst
= dst_clone(tp
->dst
);
602 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTNOROUTES
);
606 skb_dst_set(head
, dst
);
609 pkt_count
= sctp_packet_pack(packet
, head
, gso
, gfp
);
614 pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head
->len
);
616 /* start autoclose timer */
617 if (packet
->has_data
&& sctp_state(asoc
, ESTABLISHED
) &&
618 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_AUTOCLOSE
]) {
619 struct timer_list
*timer
=
620 &asoc
->timers
[SCTP_EVENT_TIMEOUT_AUTOCLOSE
];
621 unsigned long timeout
=
622 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_AUTOCLOSE
];
624 if (!mod_timer(timer
, jiffies
+ timeout
))
625 sctp_association_hold(asoc
);
629 tp
->af_specific
->ecn_capable(sk
);
631 asoc
->stats
.opackets
+= pkt_count
;
632 if (asoc
->peer
.last_sent_to
!= tp
)
633 asoc
->peer
.last_sent_to
= tp
;
635 head
->ignore_df
= packet
->ipfragok
;
636 if (tp
->dst_pending_confirm
)
637 skb_set_dst_pending_confirm(head
, 1);
638 /* neighbour should be confirmed on successful transmission or
641 if (tp
->af_specific
->sctp_xmit(head
, tp
) >= 0 &&
642 tp
->dst_pending_confirm
)
643 tp
->dst_pending_confirm
= 0;
646 list_for_each_entry_safe(chunk
, tmp
, &packet
->chunk_list
, list
) {
647 list_del_init(&chunk
->list
);
648 if (!sctp_chunk_is_data(chunk
))
649 sctp_chunk_free(chunk
);
651 sctp_packet_reset(packet
);
655 /********************************************************************
656 * 2nd Level Abstractions
657 ********************************************************************/
659 /* This private function check to see if a chunk can be added */
660 static enum sctp_xmit
sctp_packet_can_append_data(struct sctp_packet
*packet
,
661 struct sctp_chunk
*chunk
)
663 size_t datasize
, rwnd
, inflight
, flight_size
;
664 struct sctp_transport
*transport
= packet
->transport
;
665 struct sctp_association
*asoc
= transport
->asoc
;
666 struct sctp_outq
*q
= &asoc
->outqueue
;
668 /* RFC 2960 6.1 Transmission of DATA Chunks
670 * A) At any given time, the data sender MUST NOT transmit new data to
671 * any destination transport address if its peer's rwnd indicates
672 * that the peer has no buffer space (i.e. rwnd is 0, see Section
673 * 6.2.1). However, regardless of the value of rwnd (including if it
674 * is 0), the data sender can always have one DATA chunk in flight to
675 * the receiver if allowed by cwnd (see rule B below). This rule
676 * allows the sender to probe for a change in rwnd that the sender
677 * missed due to the SACK having been lost in transit from the data
678 * receiver to the data sender.
681 rwnd
= asoc
->peer
.rwnd
;
682 inflight
= q
->outstanding_bytes
;
683 flight_size
= transport
->flight_size
;
685 datasize
= sctp_data_size(chunk
);
687 if (datasize
> rwnd
&& inflight
> 0)
688 /* We have (at least) one data chunk in flight,
689 * so we can't fall back to rule 6.1 B).
691 return SCTP_XMIT_RWND_FULL
;
693 /* RFC 2960 6.1 Transmission of DATA Chunks
695 * B) At any given time, the sender MUST NOT transmit new data
696 * to a given transport address if it has cwnd or more bytes
697 * of data outstanding to that transport address.
699 /* RFC 7.2.4 & the Implementers Guide 2.8.
702 * When a Fast Retransmit is being performed the sender SHOULD
703 * ignore the value of cwnd and SHOULD NOT delay retransmission.
705 if (chunk
->fast_retransmit
!= SCTP_NEED_FRTX
&&
706 flight_size
>= transport
->cwnd
)
707 return SCTP_XMIT_RWND_FULL
;
709 /* Nagle's algorithm to solve small-packet problem:
710 * Inhibit the sending of new chunks when new outgoing data arrives
711 * if any previously transmitted data on the connection remains
715 if ((sctp_sk(asoc
->base
.sk
)->nodelay
|| inflight
== 0) &&
717 /* Nothing unacked */
720 if (!sctp_packet_empty(packet
))
721 /* Append to packet */
724 if (!sctp_state(asoc
, ESTABLISHED
))
727 /* Check whether this chunk and all the rest of pending data will fit
728 * or delay in hopes of bundling a full sized packet.
730 if (chunk
->skb
->len
+ q
->out_qlen
> transport
->pathmtu
-
731 packet
->overhead
- sctp_datachk_len(&chunk
->asoc
->stream
) - 4)
732 /* Enough data queued to fill a packet */
735 /* Don't delay large message writes that may have been fragmented */
736 if (!chunk
->msg
->can_delay
)
739 /* Defer until all data acked or packet full */
740 return SCTP_XMIT_DELAY
;
743 /* This private function does management things when adding DATA chunk */
744 static void sctp_packet_append_data(struct sctp_packet
*packet
,
745 struct sctp_chunk
*chunk
)
747 struct sctp_transport
*transport
= packet
->transport
;
748 size_t datasize
= sctp_data_size(chunk
);
749 struct sctp_association
*asoc
= transport
->asoc
;
750 u32 rwnd
= asoc
->peer
.rwnd
;
752 /* Keep track of how many bytes are in flight over this transport. */
753 transport
->flight_size
+= datasize
;
755 /* Keep track of how many bytes are in flight to the receiver. */
756 asoc
->outqueue
.outstanding_bytes
+= datasize
;
758 /* Update our view of the receiver's rwnd. */
764 asoc
->peer
.rwnd
= rwnd
;
765 sctp_chunk_assign_tsn(chunk
);
766 asoc
->stream
.si
->assign_number(chunk
);
769 static enum sctp_xmit
sctp_packet_will_fit(struct sctp_packet
*packet
,
770 struct sctp_chunk
*chunk
,
773 enum sctp_xmit retval
= SCTP_XMIT_OK
;
774 size_t psize
, pmtu
, maxsize
;
776 /* Don't bundle in this packet if this chunk's auth key doesn't
777 * match other chunks already enqueued on this packet. Also,
778 * don't bundle the chunk with auth key if other chunks in this
779 * packet don't have auth key.
781 if ((packet
->auth
&& chunk
->shkey
!= packet
->auth
->shkey
) ||
782 (!packet
->auth
&& chunk
->shkey
&&
783 chunk
->chunk_hdr
->type
!= SCTP_CID_AUTH
))
784 return SCTP_XMIT_PMTU_FULL
;
786 psize
= packet
->size
;
787 if (packet
->transport
->asoc
)
788 pmtu
= packet
->transport
->asoc
->pathmtu
;
790 pmtu
= packet
->transport
->pathmtu
;
792 /* Decide if we need to fragment or resubmit later. */
793 if (psize
+ chunk_len
> pmtu
) {
794 /* It's OK to fragment at IP level if any one of the following
796 * 1. The packet is empty (meaning this chunk is greater
798 * 2. The packet doesn't have any data in it yet and data
799 * requires authentication.
801 if (sctp_packet_empty(packet
) ||
802 (!packet
->has_data
&& chunk
->auth
)) {
803 /* We no longer do re-fragmentation.
804 * Just fragment at the IP layer, if we
805 * actually hit this condition
807 packet
->ipfragok
= 1;
811 /* Similarly, if this chunk was built before a PMTU
812 * reduction, we have to fragment it at IP level now. So
813 * if the packet already contains something, we need to
816 maxsize
= pmtu
- packet
->overhead
;
818 maxsize
-= SCTP_PAD4(packet
->auth
->skb
->len
);
819 if (chunk_len
> maxsize
)
820 retval
= SCTP_XMIT_PMTU_FULL
;
822 /* It is also okay to fragment if the chunk we are
823 * adding is a control chunk, but only if current packet
824 * is not a GSO one otherwise it causes fragmentation of
825 * a large frame. So in this case we allow the
826 * fragmentation by forcing it to be in a new packet.
828 if (!sctp_chunk_is_data(chunk
) && packet
->has_data
)
829 retval
= SCTP_XMIT_PMTU_FULL
;
831 if (psize
+ chunk_len
> packet
->max_size
)
832 /* Hit GSO/PMTU limit, gotta flush */
833 retval
= SCTP_XMIT_PMTU_FULL
;
835 if (!packet
->transport
->burst_limited
&&
836 psize
+ chunk_len
> (packet
->transport
->cwnd
>> 1))
837 /* Do not allow a single GSO packet to use more
840 retval
= SCTP_XMIT_PMTU_FULL
;
842 if (packet
->transport
->burst_limited
&&
843 psize
+ chunk_len
> (packet
->transport
->burst_limited
>> 1))
844 /* Do not allow a single GSO packet to use more
845 * than half of original cwnd.
847 retval
= SCTP_XMIT_PMTU_FULL
;
848 /* Otherwise it will fit in the GSO packet */