1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
6 * This file is part of the SCTP kernel implementation
8 * These functions handle output processing.
10 * This SCTP implementation is free software;
11 * you can redistribute it and/or modify it under the terms of
12 * the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * This SCTP implementation is distributed in the hope that it
17 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
18 * ************************
19 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
20 * See the GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with GNU CC; see the file COPYING. If not, write to
24 * the Free Software Foundation, 59 Temple Place - Suite 330,
25 * Boston, MA 02111-1307, USA.
27 * Please send any bug reports or fixes you make to the
29 * lksctp developers <linux-sctp@vger.kernel.org>
31 * Written or modified by:
32 * La Monte H.P. Yarroll <piggy@acm.org>
33 * Karl Knutson <karl@athena.chicago.il.us>
34 * Jon Grimm <jgrimm@austin.ibm.com>
35 * Sridhar Samudrala <sri@us.ibm.com>
38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40 #include <linux/types.h>
41 #include <linux/kernel.h>
42 #include <linux/wait.h>
43 #include <linux/time.h>
45 #include <linux/ipv6.h>
46 #include <linux/init.h>
47 #include <linux/slab.h>
48 #include <net/inet_ecn.h>
51 #include <net/net_namespace.h>
53 #include <linux/socket.h> /* for sa_family_t */
56 #include <net/sctp/sctp.h>
57 #include <net/sctp/sm.h>
58 #include <net/sctp/checksum.h>
60 /* Forward declarations for private helpers. */
61 static sctp_xmit_t
__sctp_packet_append_chunk(struct sctp_packet
*packet
,
62 struct sctp_chunk
*chunk
);
63 static sctp_xmit_t
sctp_packet_can_append_data(struct sctp_packet
*packet
,
64 struct sctp_chunk
*chunk
);
65 static void sctp_packet_append_data(struct sctp_packet
*packet
,
66 struct sctp_chunk
*chunk
);
67 static sctp_xmit_t
sctp_packet_will_fit(struct sctp_packet
*packet
,
68 struct sctp_chunk
*chunk
,
71 static void sctp_packet_reset(struct sctp_packet
*packet
)
73 packet
->size
= packet
->overhead
;
74 packet
->has_cookie_echo
= 0;
83 * This appears to be a followup set of initializations.
85 struct sctp_packet
*sctp_packet_config(struct sctp_packet
*packet
,
86 __u32 vtag
, int ecn_capable
)
88 struct sctp_chunk
*chunk
= NULL
;
90 pr_debug("%s: packet:%p vtag:0x%x\n", __func__
, packet
, vtag
);
94 if (ecn_capable
&& sctp_packet_empty(packet
)) {
95 chunk
= sctp_get_ecne_prepend(packet
->transport
->asoc
);
97 /* If there a is a prepend chunk stick it on the list before
98 * any other chunks get appended.
101 sctp_packet_append_chunk(packet
, chunk
);
107 /* Initialize the packet structure. */
108 struct sctp_packet
*sctp_packet_init(struct sctp_packet
*packet
,
109 struct sctp_transport
*transport
,
110 __u16 sport
, __u16 dport
)
112 struct sctp_association
*asoc
= transport
->asoc
;
115 pr_debug("%s: packet:%p transport:%p\n", __func__
, packet
, transport
);
117 packet
->transport
= transport
;
118 packet
->source_port
= sport
;
119 packet
->destination_port
= dport
;
120 INIT_LIST_HEAD(&packet
->chunk_list
);
122 struct sctp_sock
*sp
= sctp_sk(asoc
->base
.sk
);
123 overhead
= sp
->pf
->af
->net_header_len
;
125 overhead
= sizeof(struct ipv6hdr
);
127 overhead
+= sizeof(struct sctphdr
);
128 packet
->overhead
= overhead
;
129 sctp_packet_reset(packet
);
136 void sctp_packet_free(struct sctp_packet
*packet
)
138 struct sctp_chunk
*chunk
, *tmp
;
140 pr_debug("%s: packet:%p\n", __func__
, packet
);
142 list_for_each_entry_safe(chunk
, tmp
, &packet
->chunk_list
, list
) {
143 list_del_init(&chunk
->list
);
144 sctp_chunk_free(chunk
);
148 /* This routine tries to append the chunk to the offered packet. If adding
149 * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk
150 * is not present in the packet, it transmits the input packet.
151 * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long
152 * as it can fit in the packet, but any more data that does not fit in this
153 * packet can be sent only after receiving the COOKIE_ACK.
155 sctp_xmit_t
sctp_packet_transmit_chunk(struct sctp_packet
*packet
,
156 struct sctp_chunk
*chunk
,
162 pr_debug("%s: packet:%p chunk:%p\n", __func__
, packet
, chunk
);
164 switch ((retval
= (sctp_packet_append_chunk(packet
, chunk
)))) {
165 case SCTP_XMIT_PMTU_FULL
:
166 if (!packet
->has_cookie_echo
) {
167 error
= sctp_packet_transmit(packet
);
169 chunk
->skb
->sk
->sk_err
= -error
;
171 /* If we have an empty packet, then we can NOT ever
175 retval
= sctp_packet_append_chunk(packet
,
180 case SCTP_XMIT_RWND_FULL
:
182 case SCTP_XMIT_NAGLE_DELAY
:
189 /* Try to bundle an auth chunk into the packet. */
190 static sctp_xmit_t
sctp_packet_bundle_auth(struct sctp_packet
*pkt
,
191 struct sctp_chunk
*chunk
)
193 struct sctp_association
*asoc
= pkt
->transport
->asoc
;
194 struct sctp_chunk
*auth
;
195 sctp_xmit_t retval
= SCTP_XMIT_OK
;
197 /* if we don't have an association, we can't do authentication */
201 /* See if this is an auth chunk we are bundling or if
202 * auth is already bundled.
204 if (chunk
->chunk_hdr
->type
== SCTP_CID_AUTH
|| pkt
->has_auth
)
207 /* if the peer did not request this chunk to be authenticated,
213 auth
= sctp_make_auth(asoc
);
217 retval
= __sctp_packet_append_chunk(pkt
, auth
);
219 if (retval
!= SCTP_XMIT_OK
)
220 sctp_chunk_free(auth
);
225 /* Try to bundle a SACK with the packet. */
226 static sctp_xmit_t
sctp_packet_bundle_sack(struct sctp_packet
*pkt
,
227 struct sctp_chunk
*chunk
)
229 sctp_xmit_t retval
= SCTP_XMIT_OK
;
231 /* If sending DATA and haven't aleady bundled a SACK, try to
232 * bundle one in to the packet.
234 if (sctp_chunk_is_data(chunk
) && !pkt
->has_sack
&&
235 !pkt
->has_cookie_echo
) {
236 struct sctp_association
*asoc
;
237 struct timer_list
*timer
;
238 asoc
= pkt
->transport
->asoc
;
239 timer
= &asoc
->timers
[SCTP_EVENT_TIMEOUT_SACK
];
241 /* If the SACK timer is running, we have a pending SACK */
242 if (timer_pending(timer
)) {
243 struct sctp_chunk
*sack
;
245 if (pkt
->transport
->sack_generation
!=
246 pkt
->transport
->asoc
->peer
.sack_generation
)
249 asoc
->a_rwnd
= asoc
->rwnd
;
250 sack
= sctp_make_sack(asoc
);
252 retval
= __sctp_packet_append_chunk(pkt
, sack
);
253 if (retval
!= SCTP_XMIT_OK
) {
254 sctp_chunk_free(sack
);
257 asoc
->peer
.sack_needed
= 0;
258 if (del_timer(timer
))
259 sctp_association_put(asoc
);
268 /* Append a chunk to the offered packet reporting back any inability to do
271 static sctp_xmit_t
__sctp_packet_append_chunk(struct sctp_packet
*packet
,
272 struct sctp_chunk
*chunk
)
274 sctp_xmit_t retval
= SCTP_XMIT_OK
;
275 __u16 chunk_len
= WORD_ROUND(ntohs(chunk
->chunk_hdr
->length
));
277 /* Check to see if this chunk will fit into the packet */
278 retval
= sctp_packet_will_fit(packet
, chunk
, chunk_len
);
279 if (retval
!= SCTP_XMIT_OK
)
282 /* We believe that this chunk is OK to add to the packet */
283 switch (chunk
->chunk_hdr
->type
) {
285 /* Account for the data being in the packet */
286 sctp_packet_append_data(packet
, chunk
);
287 /* Disallow SACK bundling after DATA. */
288 packet
->has_sack
= 1;
289 /* Disallow AUTH bundling after DATA */
290 packet
->has_auth
= 1;
291 /* Let it be knows that packet has DATA in it */
292 packet
->has_data
= 1;
293 /* timestamp the chunk for rtx purposes */
294 chunk
->sent_at
= jiffies
;
296 case SCTP_CID_COOKIE_ECHO
:
297 packet
->has_cookie_echo
= 1;
301 packet
->has_sack
= 1;
303 chunk
->asoc
->stats
.osacks
++;
307 packet
->has_auth
= 1;
308 packet
->auth
= chunk
;
312 /* It is OK to send this chunk. */
313 list_add_tail(&chunk
->list
, &packet
->chunk_list
);
314 packet
->size
+= chunk_len
;
315 chunk
->transport
= packet
->transport
;
320 /* Append a chunk to the offered packet reporting back any inability to do
323 sctp_xmit_t
sctp_packet_append_chunk(struct sctp_packet
*packet
,
324 struct sctp_chunk
*chunk
)
326 sctp_xmit_t retval
= SCTP_XMIT_OK
;
328 pr_debug("%s: packet:%p chunk:%p\n", __func__
, packet
, chunk
);
330 /* Data chunks are special. Before seeing what else we can
331 * bundle into this packet, check to see if we are allowed to
334 if (sctp_chunk_is_data(chunk
)) {
335 retval
= sctp_packet_can_append_data(packet
, chunk
);
336 if (retval
!= SCTP_XMIT_OK
)
340 /* Try to bundle AUTH chunk */
341 retval
= sctp_packet_bundle_auth(packet
, chunk
);
342 if (retval
!= SCTP_XMIT_OK
)
345 /* Try to bundle SACK chunk */
346 retval
= sctp_packet_bundle_sack(packet
, chunk
);
347 if (retval
!= SCTP_XMIT_OK
)
350 retval
= __sctp_packet_append_chunk(packet
, chunk
);
356 static void sctp_packet_release_owner(struct sk_buff
*skb
)
361 static void sctp_packet_set_owner_w(struct sk_buff
*skb
, struct sock
*sk
)
365 skb
->destructor
= sctp_packet_release_owner
;
368 * The data chunks have already been accounted for in sctp_sendmsg(),
369 * therefore only reserve a single byte to keep socket around until
370 * the packet has been transmitted.
372 atomic_inc(&sk
->sk_wmem_alloc
);
375 /* All packets are sent to the network through this function from
378 * The return value is a normal kernel error return value.
380 int sctp_packet_transmit(struct sctp_packet
*packet
)
382 struct sctp_transport
*tp
= packet
->transport
;
383 struct sctp_association
*asoc
= tp
->asoc
;
385 struct sk_buff
*nskb
;
386 struct sctp_chunk
*chunk
, *tmp
;
389 int padding
; /* How much padding do we need? */
391 struct dst_entry
*dst
= tp
->dst
;
392 unsigned char *auth
= NULL
; /* pointer to auth in skb data */
394 pr_debug("%s: packet:%p\n", __func__
, packet
);
396 /* Do NOT generate a chunkless packet. */
397 if (list_empty(&packet
->chunk_list
))
400 /* Set up convenience variables... */
401 chunk
= list_entry(packet
->chunk_list
.next
, struct sctp_chunk
, list
);
404 /* Allocate the new skb. */
405 nskb
= alloc_skb(packet
->size
+ LL_MAX_HEADER
, GFP_ATOMIC
);
409 /* Make sure the outbound skb has enough header room reserved. */
410 skb_reserve(nskb
, packet
->overhead
+ LL_MAX_HEADER
);
412 /* Set the owning socket so that we know where to get the
413 * destination IP address.
415 sctp_packet_set_owner_w(nskb
, sk
);
417 if (!sctp_transport_dst_check(tp
)) {
418 sctp_transport_route(tp
, NULL
, sctp_sk(sk
));
419 if (asoc
&& (asoc
->param_flags
& SPP_PMTUD_ENABLE
)) {
420 sctp_assoc_sync_pmtu(sk
, asoc
);
423 dst
= dst_clone(tp
->dst
);
424 skb_dst_set(nskb
, dst
);
428 /* Build the SCTP header. */
429 sh
= (struct sctphdr
*)skb_push(nskb
, sizeof(struct sctphdr
));
430 skb_reset_transport_header(nskb
);
431 sh
->source
= htons(packet
->source_port
);
432 sh
->dest
= htons(packet
->destination_port
);
434 /* From 6.8 Adler-32 Checksum Calculation:
435 * After the packet is constructed (containing the SCTP common
436 * header and one or more control or DATA chunks), the
439 * 1) Fill in the proper Verification Tag in the SCTP common
440 * header and initialize the checksum field to 0's.
442 sh
->vtag
= htonl(packet
->vtag
);
448 * An endpoint bundles chunks by simply including multiple
449 * chunks in one outbound SCTP packet. ...
453 * 3.2 Chunk Field Descriptions
455 * The total length of a chunk (including Type, Length and
456 * Value fields) MUST be a multiple of 4 bytes. If the length
457 * of the chunk is not a multiple of 4 bytes, the sender MUST
458 * pad the chunk with all zero bytes and this padding is not
459 * included in the chunk length field. The sender should
460 * never pad with more than 3 bytes.
462 * [This whole comment explains WORD_ROUND() below.]
465 pr_debug("***sctp_transmit_packet***\n");
467 list_for_each_entry_safe(chunk
, tmp
, &packet
->chunk_list
, list
) {
468 list_del_init(&chunk
->list
);
469 if (sctp_chunk_is_data(chunk
)) {
470 /* 6.3.1 C4) When data is in flight and when allowed
471 * by rule C5, a new RTT measurement MUST be made each
472 * round trip. Furthermore, new RTT measurements
473 * SHOULD be made no more than once per round-trip
474 * for a given destination transport address.
477 if (!tp
->rto_pending
) {
478 chunk
->rtt_in_progress
= 1;
484 padding
= WORD_ROUND(chunk
->skb
->len
) - chunk
->skb
->len
;
486 memset(skb_put(chunk
->skb
, padding
), 0, padding
);
488 /* if this is the auth chunk that we are adding,
489 * store pointer where it will be added and put
490 * the auth into the packet.
492 if (chunk
== packet
->auth
)
493 auth
= skb_tail_pointer(nskb
);
495 memcpy(skb_put(nskb
, chunk
->skb
->len
),
496 chunk
->skb
->data
, chunk
->skb
->len
);
498 pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, "
499 "rtt_in_progress:%d\n", chunk
,
500 sctp_cname(SCTP_ST_CHUNK(chunk
->chunk_hdr
->type
)),
501 chunk
->has_tsn
? "TSN" : "No TSN",
502 chunk
->has_tsn
? ntohl(chunk
->subh
.data_hdr
->tsn
) : 0,
503 ntohs(chunk
->chunk_hdr
->length
), chunk
->skb
->len
,
504 chunk
->rtt_in_progress
);
507 * If this is a control chunk, this is our last
508 * reference. Free data chunks after they've been
509 * acknowledged or have failed.
511 if (!sctp_chunk_is_data(chunk
))
512 sctp_chunk_free(chunk
);
515 /* SCTP-AUTH, Section 6.2
516 * The sender MUST calculate the MAC as described in RFC2104 [2]
517 * using the hash function H as described by the MAC Identifier and
518 * the shared association key K based on the endpoint pair shared key
519 * described by the shared key identifier. The 'data' used for the
520 * computation of the AUTH-chunk is given by the AUTH chunk with its
521 * HMAC field set to zero (as shown in Figure 6) followed by all
522 * chunks that are placed after the AUTH chunk in the SCTP packet.
525 sctp_auth_calculate_hmac(asoc
, nskb
,
526 (struct sctp_auth_chunk
*)auth
,
529 /* 2) Calculate the Adler-32 checksum of the whole packet,
530 * including the SCTP common header and all the
533 * Note: Adler-32 is no longer applicable, as has been replaced
534 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
536 if (!sctp_checksum_disable
) {
537 if (!(dst
->dev
->features
& NETIF_F_SCTP_CSUM
) ||
538 (dst_xfrm(dst
) != NULL
) || packet
->ipfragok
) {
539 sh
->checksum
= sctp_compute_cksum(nskb
, 0);
541 /* no need to seed pseudo checksum for SCTP */
542 nskb
->ip_summed
= CHECKSUM_PARTIAL
;
543 nskb
->csum_start
= (skb_transport_header(nskb
) -
545 nskb
->csum_offset
= offsetof(struct sctphdr
, checksum
);
549 /* IP layer ECN support
551 * "The ECN-Capable Transport (ECT) bit would be set by the
552 * data sender to indicate that the end-points of the
553 * transport protocol are ECN-capable."
555 * Now setting the ECT bit all the time, as it should not cause
556 * any problems protocol-wise even if our peer ignores it.
558 * Note: The works for IPv6 layer checks this bit too later
559 * in transmission. See IP6_ECN_flow_xmit().
561 (*tp
->af_specific
->ecn_capable
)(nskb
->sk
);
563 /* Set up the IP options. */
564 /* BUG: not implemented
565 * For v4 this all lives somewhere in sk->sk_opt...
568 /* Dump that on IP! */
570 asoc
->stats
.opackets
++;
571 if (asoc
->peer
.last_sent_to
!= tp
)
572 /* Considering the multiple CPU scenario, this is a
573 * "correcter" place for last_sent_to. --xguo
575 asoc
->peer
.last_sent_to
= tp
;
579 struct timer_list
*timer
;
580 unsigned long timeout
;
582 /* Restart the AUTOCLOSE timer when sending data. */
583 if (sctp_state(asoc
, ESTABLISHED
) && asoc
->autoclose
) {
584 timer
= &asoc
->timers
[SCTP_EVENT_TIMEOUT_AUTOCLOSE
];
585 timeout
= asoc
->timeouts
[SCTP_EVENT_TIMEOUT_AUTOCLOSE
];
587 if (!mod_timer(timer
, jiffies
+ timeout
))
588 sctp_association_hold(asoc
);
592 pr_debug("***sctp_transmit_packet*** skb->len:%d\n", nskb
->len
);
594 nskb
->local_df
= packet
->ipfragok
;
595 (*tp
->af_specific
->sctp_xmit
)(nskb
, tp
);
598 sctp_packet_reset(packet
);
602 IP_INC_STATS_BH(sock_net(asoc
->base
.sk
), IPSTATS_MIB_OUTNOROUTES
);
604 /* FIXME: Returning the 'err' will effect all the associations
605 * associated with a socket, although only one of the paths of the
606 * association is unreachable.
607 * The real failure of a transport or association can be passed on
608 * to the user via notifications. So setting this error may not be
611 /* err = -EHOSTUNREACH; */
613 /* Control chunks are unreliable so just drop them. DATA chunks
614 * will get resent or dropped later.
617 list_for_each_entry_safe(chunk
, tmp
, &packet
->chunk_list
, list
) {
618 list_del_init(&chunk
->list
);
619 if (!sctp_chunk_is_data(chunk
))
620 sctp_chunk_free(chunk
);
628 /********************************************************************
629 * 2nd Level Abstractions
630 ********************************************************************/
632 /* This private function check to see if a chunk can be added */
633 static sctp_xmit_t
sctp_packet_can_append_data(struct sctp_packet
*packet
,
634 struct sctp_chunk
*chunk
)
636 sctp_xmit_t retval
= SCTP_XMIT_OK
;
637 size_t datasize
, rwnd
, inflight
, flight_size
;
638 struct sctp_transport
*transport
= packet
->transport
;
639 struct sctp_association
*asoc
= transport
->asoc
;
640 struct sctp_outq
*q
= &asoc
->outqueue
;
642 /* RFC 2960 6.1 Transmission of DATA Chunks
644 * A) At any given time, the data sender MUST NOT transmit new data to
645 * any destination transport address if its peer's rwnd indicates
646 * that the peer has no buffer space (i.e. rwnd is 0, see Section
647 * 6.2.1). However, regardless of the value of rwnd (including if it
648 * is 0), the data sender can always have one DATA chunk in flight to
649 * the receiver if allowed by cwnd (see rule B below). This rule
650 * allows the sender to probe for a change in rwnd that the sender
651 * missed due to the SACK having been lost in transit from the data
652 * receiver to the data sender.
655 rwnd
= asoc
->peer
.rwnd
;
656 inflight
= q
->outstanding_bytes
;
657 flight_size
= transport
->flight_size
;
659 datasize
= sctp_data_size(chunk
);
661 if (datasize
> rwnd
) {
663 /* We have (at least) one data chunk in flight,
664 * so we can't fall back to rule 6.1 B).
666 retval
= SCTP_XMIT_RWND_FULL
;
671 /* RFC 2960 6.1 Transmission of DATA Chunks
673 * B) At any given time, the sender MUST NOT transmit new data
674 * to a given transport address if it has cwnd or more bytes
675 * of data outstanding to that transport address.
677 /* RFC 7.2.4 & the Implementers Guide 2.8.
680 * When a Fast Retransmit is being performed the sender SHOULD
681 * ignore the value of cwnd and SHOULD NOT delay retransmission.
683 if (chunk
->fast_retransmit
!= SCTP_NEED_FRTX
)
684 if (flight_size
>= transport
->cwnd
) {
685 retval
= SCTP_XMIT_RWND_FULL
;
689 /* Nagle's algorithm to solve small-packet problem:
690 * Inhibit the sending of new chunks when new outgoing data arrives
691 * if any previously transmitted data on the connection remains
694 if (!sctp_sk(asoc
->base
.sk
)->nodelay
&& sctp_packet_empty(packet
) &&
695 inflight
&& sctp_state(asoc
, ESTABLISHED
)) {
696 unsigned int max
= transport
->pathmtu
- packet
->overhead
;
697 unsigned int len
= chunk
->skb
->len
+ q
->out_qlen
;
699 /* Check whether this chunk and all the rest of pending
700 * data will fit or delay in hopes of bundling a full
702 * Don't delay large message writes that may have been
703 * fragmeneted into small peices.
705 if ((len
< max
) && chunk
->msg
->can_delay
) {
706 retval
= SCTP_XMIT_NAGLE_DELAY
;
715 /* This private function does management things when adding DATA chunk */
716 static void sctp_packet_append_data(struct sctp_packet
*packet
,
717 struct sctp_chunk
*chunk
)
719 struct sctp_transport
*transport
= packet
->transport
;
720 size_t datasize
= sctp_data_size(chunk
);
721 struct sctp_association
*asoc
= transport
->asoc
;
722 u32 rwnd
= asoc
->peer
.rwnd
;
724 /* Keep track of how many bytes are in flight over this transport. */
725 transport
->flight_size
+= datasize
;
727 /* Keep track of how many bytes are in flight to the receiver. */
728 asoc
->outqueue
.outstanding_bytes
+= datasize
;
730 /* Update our view of the receiver's rwnd. */
736 asoc
->peer
.rwnd
= rwnd
;
737 /* Has been accepted for transmission. */
738 if (!asoc
->peer
.prsctp_capable
)
739 chunk
->msg
->can_abandon
= 0;
740 sctp_chunk_assign_tsn(chunk
);
741 sctp_chunk_assign_ssn(chunk
);
744 static sctp_xmit_t
sctp_packet_will_fit(struct sctp_packet
*packet
,
745 struct sctp_chunk
*chunk
,
751 sctp_xmit_t retval
= SCTP_XMIT_OK
;
753 psize
= packet
->size
;
754 pmtu
= ((packet
->transport
->asoc
) ?
755 (packet
->transport
->asoc
->pathmtu
) :
756 (packet
->transport
->pathmtu
));
758 too_big
= (psize
+ chunk_len
> pmtu
);
760 /* Decide if we need to fragment or resubmit later. */
762 /* It's OK to fragmet at IP level if any one of the following
764 * 1. The packet is empty (meaning this chunk is greater
766 * 2. The chunk we are adding is a control chunk
767 * 3. The packet doesn't have any data in it yet and data
768 * requires authentication.
770 if (sctp_packet_empty(packet
) || !sctp_chunk_is_data(chunk
) ||
771 (!packet
->has_data
&& chunk
->auth
)) {
772 /* We no longer do re-fragmentation.
773 * Just fragment at the IP layer, if we
774 * actually hit this condition
776 packet
->ipfragok
= 1;
778 retval
= SCTP_XMIT_PMTU_FULL
;