Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / net / sctp / output.c
blob01a26ee051e3878ced4253429b5017708d0c138f
1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
6 * This file is part of the SCTP kernel implementation
8 * These functions handle output processing.
10 * This SCTP implementation is free software;
11 * you can redistribute it and/or modify it under the terms of
12 * the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
16 * This SCTP implementation is distributed in the hope that it
17 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
18 * ************************
19 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
20 * See the GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with GNU CC; see the file COPYING. If not, see
24 * <http://www.gnu.org/licenses/>.
26 * Please send any bug reports or fixes you make to the
27 * email address(es):
28 * lksctp developers <linux-sctp@vger.kernel.org>
30 * Written or modified by:
31 * La Monte H.P. Yarroll <piggy@acm.org>
32 * Karl Knutson <karl@athena.chicago.il.us>
33 * Jon Grimm <jgrimm@austin.ibm.com>
34 * Sridhar Samudrala <sri@us.ibm.com>
37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39 #include <linux/types.h>
40 #include <linux/kernel.h>
41 #include <linux/wait.h>
42 #include <linux/time.h>
43 #include <linux/ip.h>
44 #include <linux/ipv6.h>
45 #include <linux/init.h>
46 #include <linux/slab.h>
47 #include <net/inet_ecn.h>
48 #include <net/ip.h>
49 #include <net/icmp.h>
50 #include <net/net_namespace.h>
52 #include <linux/socket.h> /* for sa_family_t */
53 #include <net/sock.h>
55 #include <net/sctp/sctp.h>
56 #include <net/sctp/sm.h>
57 #include <net/sctp/checksum.h>
59 /* Forward declarations for private helpers. */
60 static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
61 struct sctp_chunk *chunk);
62 static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
63 struct sctp_chunk *chunk);
64 static void sctp_packet_append_data(struct sctp_packet *packet,
65 struct sctp_chunk *chunk);
66 static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
67 struct sctp_chunk *chunk,
68 u16 chunk_len);
70 static void sctp_packet_reset(struct sctp_packet *packet)
72 packet->size = packet->overhead;
73 packet->has_cookie_echo = 0;
74 packet->has_sack = 0;
75 packet->has_data = 0;
76 packet->has_auth = 0;
77 packet->ipfragok = 0;
78 packet->auth = NULL;
81 /* Config a packet.
82 * This appears to be a followup set of initializations.
84 void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
85 int ecn_capable)
87 struct sctp_transport *tp = packet->transport;
88 struct sctp_association *asoc = tp->asoc;
89 struct sock *sk;
91 pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
92 packet->vtag = vtag;
94 /* do the following jobs only once for a flush schedule */
95 if (!sctp_packet_empty(packet))
96 return;
98 /* set packet max_size with pathmtu */
99 packet->max_size = tp->pathmtu;
100 if (!asoc)
101 return;
103 /* update dst or transport pathmtu if in need */
104 sk = asoc->base.sk;
105 if (!sctp_transport_dst_check(tp)) {
106 sctp_transport_route(tp, NULL, sctp_sk(sk));
107 if (asoc->param_flags & SPP_PMTUD_ENABLE)
108 sctp_assoc_sync_pmtu(asoc);
109 } else if (!sctp_transport_pmtu_check(tp)) {
110 if (asoc->param_flags & SPP_PMTUD_ENABLE)
111 sctp_assoc_sync_pmtu(asoc);
114 /* If there a is a prepend chunk stick it on the list before
115 * any other chunks get appended.
117 if (ecn_capable) {
118 struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc);
120 if (chunk)
121 sctp_packet_append_chunk(packet, chunk);
124 if (!tp->dst)
125 return;
127 /* set packet max_size with gso_max_size if gso is enabled*/
128 rcu_read_lock();
129 if (__sk_dst_get(sk) != tp->dst) {
130 dst_hold(tp->dst);
131 sk_setup_caps(sk, tp->dst);
133 packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size
134 : asoc->pathmtu;
135 rcu_read_unlock();
138 /* Initialize the packet structure. */
139 void sctp_packet_init(struct sctp_packet *packet,
140 struct sctp_transport *transport,
141 __u16 sport, __u16 dport)
143 struct sctp_association *asoc = transport->asoc;
144 size_t overhead;
146 pr_debug("%s: packet:%p transport:%p\n", __func__, packet, transport);
148 packet->transport = transport;
149 packet->source_port = sport;
150 packet->destination_port = dport;
151 INIT_LIST_HEAD(&packet->chunk_list);
152 if (asoc) {
153 struct sctp_sock *sp = sctp_sk(asoc->base.sk);
154 overhead = sp->pf->af->net_header_len;
155 } else {
156 overhead = sizeof(struct ipv6hdr);
158 overhead += sizeof(struct sctphdr);
159 packet->overhead = overhead;
160 sctp_packet_reset(packet);
161 packet->vtag = 0;
164 /* Free a packet. */
165 void sctp_packet_free(struct sctp_packet *packet)
167 struct sctp_chunk *chunk, *tmp;
169 pr_debug("%s: packet:%p\n", __func__, packet);
171 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
172 list_del_init(&chunk->list);
173 sctp_chunk_free(chunk);
177 /* This routine tries to append the chunk to the offered packet. If adding
178 * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk
179 * is not present in the packet, it transmits the input packet.
180 * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long
181 * as it can fit in the packet, but any more data that does not fit in this
182 * packet can be sent only after receiving the COOKIE_ACK.
184 enum sctp_xmit sctp_packet_transmit_chunk(struct sctp_packet *packet,
185 struct sctp_chunk *chunk,
186 int one_packet, gfp_t gfp)
188 enum sctp_xmit retval;
190 pr_debug("%s: packet:%p size:%zu chunk:%p size:%d\n", __func__,
191 packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1);
193 switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) {
194 case SCTP_XMIT_PMTU_FULL:
195 if (!packet->has_cookie_echo) {
196 int error = 0;
198 error = sctp_packet_transmit(packet, gfp);
199 if (error < 0)
200 chunk->skb->sk->sk_err = -error;
202 /* If we have an empty packet, then we can NOT ever
203 * return PMTU_FULL.
205 if (!one_packet)
206 retval = sctp_packet_append_chunk(packet,
207 chunk);
209 break;
211 case SCTP_XMIT_RWND_FULL:
212 case SCTP_XMIT_OK:
213 case SCTP_XMIT_DELAY:
214 break;
217 return retval;
220 /* Try to bundle an auth chunk into the packet. */
221 static enum sctp_xmit sctp_packet_bundle_auth(struct sctp_packet *pkt,
222 struct sctp_chunk *chunk)
224 struct sctp_association *asoc = pkt->transport->asoc;
225 enum sctp_xmit retval = SCTP_XMIT_OK;
226 struct sctp_chunk *auth;
228 /* if we don't have an association, we can't do authentication */
229 if (!asoc)
230 return retval;
232 /* See if this is an auth chunk we are bundling or if
233 * auth is already bundled.
235 if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth)
236 return retval;
238 /* if the peer did not request this chunk to be authenticated,
239 * don't do it
241 if (!chunk->auth)
242 return retval;
244 auth = sctp_make_auth(asoc);
245 if (!auth)
246 return retval;
248 retval = __sctp_packet_append_chunk(pkt, auth);
250 if (retval != SCTP_XMIT_OK)
251 sctp_chunk_free(auth);
253 return retval;
256 /* Try to bundle a SACK with the packet. */
257 static enum sctp_xmit sctp_packet_bundle_sack(struct sctp_packet *pkt,
258 struct sctp_chunk *chunk)
260 enum sctp_xmit retval = SCTP_XMIT_OK;
262 /* If sending DATA and haven't aleady bundled a SACK, try to
263 * bundle one in to the packet.
265 if (sctp_chunk_is_data(chunk) && !pkt->has_sack &&
266 !pkt->has_cookie_echo) {
267 struct sctp_association *asoc;
268 struct timer_list *timer;
269 asoc = pkt->transport->asoc;
270 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
272 /* If the SACK timer is running, we have a pending SACK */
273 if (timer_pending(timer)) {
274 struct sctp_chunk *sack;
276 if (pkt->transport->sack_generation !=
277 pkt->transport->asoc->peer.sack_generation)
278 return retval;
280 asoc->a_rwnd = asoc->rwnd;
281 sack = sctp_make_sack(asoc);
282 if (sack) {
283 retval = __sctp_packet_append_chunk(pkt, sack);
284 if (retval != SCTP_XMIT_OK) {
285 sctp_chunk_free(sack);
286 goto out;
288 asoc->peer.sack_needed = 0;
289 if (del_timer(timer))
290 sctp_association_put(asoc);
294 out:
295 return retval;
299 /* Append a chunk to the offered packet reporting back any inability to do
300 * so.
302 static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
303 struct sctp_chunk *chunk)
305 __u16 chunk_len = SCTP_PAD4(ntohs(chunk->chunk_hdr->length));
306 enum sctp_xmit retval = SCTP_XMIT_OK;
308 /* Check to see if this chunk will fit into the packet */
309 retval = sctp_packet_will_fit(packet, chunk, chunk_len);
310 if (retval != SCTP_XMIT_OK)
311 goto finish;
313 /* We believe that this chunk is OK to add to the packet */
314 switch (chunk->chunk_hdr->type) {
315 case SCTP_CID_DATA:
316 case SCTP_CID_I_DATA:
317 /* Account for the data being in the packet */
318 sctp_packet_append_data(packet, chunk);
319 /* Disallow SACK bundling after DATA. */
320 packet->has_sack = 1;
321 /* Disallow AUTH bundling after DATA */
322 packet->has_auth = 1;
323 /* Let it be knows that packet has DATA in it */
324 packet->has_data = 1;
325 /* timestamp the chunk for rtx purposes */
326 chunk->sent_at = jiffies;
327 /* Mainly used for prsctp RTX policy */
328 chunk->sent_count++;
329 break;
330 case SCTP_CID_COOKIE_ECHO:
331 packet->has_cookie_echo = 1;
332 break;
334 case SCTP_CID_SACK:
335 packet->has_sack = 1;
336 if (chunk->asoc)
337 chunk->asoc->stats.osacks++;
338 break;
340 case SCTP_CID_AUTH:
341 packet->has_auth = 1;
342 packet->auth = chunk;
343 break;
346 /* It is OK to send this chunk. */
347 list_add_tail(&chunk->list, &packet->chunk_list);
348 packet->size += chunk_len;
349 chunk->transport = packet->transport;
350 finish:
351 return retval;
354 /* Append a chunk to the offered packet reporting back any inability to do
355 * so.
357 enum sctp_xmit sctp_packet_append_chunk(struct sctp_packet *packet,
358 struct sctp_chunk *chunk)
360 enum sctp_xmit retval = SCTP_XMIT_OK;
362 pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk);
364 /* Data chunks are special. Before seeing what else we can
365 * bundle into this packet, check to see if we are allowed to
366 * send this DATA.
368 if (sctp_chunk_is_data(chunk)) {
369 retval = sctp_packet_can_append_data(packet, chunk);
370 if (retval != SCTP_XMIT_OK)
371 goto finish;
374 /* Try to bundle AUTH chunk */
375 retval = sctp_packet_bundle_auth(packet, chunk);
376 if (retval != SCTP_XMIT_OK)
377 goto finish;
379 /* Try to bundle SACK chunk */
380 retval = sctp_packet_bundle_sack(packet, chunk);
381 if (retval != SCTP_XMIT_OK)
382 goto finish;
384 retval = __sctp_packet_append_chunk(packet, chunk);
386 finish:
387 return retval;
390 static void sctp_packet_release_owner(struct sk_buff *skb)
392 sk_free(skb->sk);
395 static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk)
397 skb_orphan(skb);
398 skb->sk = sk;
399 skb->destructor = sctp_packet_release_owner;
402 * The data chunks have already been accounted for in sctp_sendmsg(),
403 * therefore only reserve a single byte to keep socket around until
404 * the packet has been transmitted.
406 refcount_inc(&sk->sk_wmem_alloc);
409 static int sctp_packet_pack(struct sctp_packet *packet,
410 struct sk_buff *head, int gso, gfp_t gfp)
412 struct sctp_transport *tp = packet->transport;
413 struct sctp_auth_chunk *auth = NULL;
414 struct sctp_chunk *chunk, *tmp;
415 int pkt_count = 0, pkt_size;
416 struct sock *sk = head->sk;
417 struct sk_buff *nskb;
418 int auth_len = 0;
420 if (gso) {
421 skb_shinfo(head)->gso_type = sk->sk_gso_type;
422 NAPI_GRO_CB(head)->last = head;
423 } else {
424 nskb = head;
425 pkt_size = packet->size;
426 goto merge;
429 do {
430 /* calculate the pkt_size and alloc nskb */
431 pkt_size = packet->overhead;
432 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list,
433 list) {
434 int padded = SCTP_PAD4(chunk->skb->len);
436 if (chunk == packet->auth)
437 auth_len = padded;
438 else if (auth_len + padded + packet->overhead >
439 tp->pathmtu)
440 return 0;
441 else if (pkt_size + padded > tp->pathmtu)
442 break;
443 pkt_size += padded;
445 nskb = alloc_skb(pkt_size + MAX_HEADER, gfp);
446 if (!nskb)
447 return 0;
448 skb_reserve(nskb, packet->overhead + MAX_HEADER);
450 merge:
451 /* merge chunks into nskb and append nskb into head list */
452 pkt_size -= packet->overhead;
453 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
454 int padding;
456 list_del_init(&chunk->list);
457 if (sctp_chunk_is_data(chunk)) {
458 if (!sctp_chunk_retransmitted(chunk) &&
459 !tp->rto_pending) {
460 chunk->rtt_in_progress = 1;
461 tp->rto_pending = 1;
465 padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len;
466 if (padding)
467 skb_put_zero(chunk->skb, padding);
469 if (chunk == packet->auth)
470 auth = (struct sctp_auth_chunk *)
471 skb_tail_pointer(nskb);
473 skb_put_data(nskb, chunk->skb->data, chunk->skb->len);
475 pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n",
476 chunk,
477 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
478 chunk->has_tsn ? "TSN" : "No TSN",
479 chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0,
480 ntohs(chunk->chunk_hdr->length), chunk->skb->len,
481 chunk->rtt_in_progress);
483 pkt_size -= SCTP_PAD4(chunk->skb->len);
485 if (!sctp_chunk_is_data(chunk) && chunk != packet->auth)
486 sctp_chunk_free(chunk);
488 if (!pkt_size)
489 break;
492 if (auth) {
493 sctp_auth_calculate_hmac(tp->asoc, nskb, auth, gfp);
494 /* free auth if no more chunks, or add it back */
495 if (list_empty(&packet->chunk_list))
496 sctp_chunk_free(packet->auth);
497 else
498 list_add(&packet->auth->list,
499 &packet->chunk_list);
502 if (gso) {
503 if (skb_gro_receive(&head, nskb)) {
504 kfree_skb(nskb);
505 return 0;
507 if (WARN_ON_ONCE(skb_shinfo(head)->gso_segs >=
508 sk->sk_gso_max_segs))
509 return 0;
512 pkt_count++;
513 } while (!list_empty(&packet->chunk_list));
515 if (gso) {
516 memset(head->cb, 0, max(sizeof(struct inet_skb_parm),
517 sizeof(struct inet6_skb_parm)));
518 skb_shinfo(head)->gso_segs = pkt_count;
519 skb_shinfo(head)->gso_size = GSO_BY_FRAGS;
520 rcu_read_lock();
521 if (skb_dst(head) != tp->dst) {
522 dst_hold(tp->dst);
523 sk_setup_caps(sk, tp->dst);
525 rcu_read_unlock();
526 goto chksum;
529 if (sctp_checksum_disable)
530 return 1;
532 if (!(skb_dst(head)->dev->features & NETIF_F_SCTP_CRC) ||
533 dst_xfrm(skb_dst(head)) || packet->ipfragok) {
534 struct sctphdr *sh =
535 (struct sctphdr *)skb_transport_header(head);
537 sh->checksum = sctp_compute_cksum(head, 0);
538 } else {
539 chksum:
540 head->ip_summed = CHECKSUM_PARTIAL;
541 head->csum_not_inet = 1;
542 head->csum_start = skb_transport_header(head) - head->head;
543 head->csum_offset = offsetof(struct sctphdr, checksum);
546 return pkt_count;
549 /* All packets are sent to the network through this function from
550 * sctp_outq_tail().
552 * The return value is always 0 for now.
554 int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
556 struct sctp_transport *tp = packet->transport;
557 struct sctp_association *asoc = tp->asoc;
558 struct sctp_chunk *chunk, *tmp;
559 int pkt_count, gso = 0;
560 struct dst_entry *dst;
561 struct sk_buff *head;
562 struct sctphdr *sh;
563 struct sock *sk;
565 pr_debug("%s: packet:%p\n", __func__, packet);
566 if (list_empty(&packet->chunk_list))
567 return 0;
568 chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list);
569 sk = chunk->skb->sk;
571 /* check gso */
572 if (packet->size > tp->pathmtu && !packet->ipfragok) {
573 if (!sk_can_gso(sk)) {
574 pr_err_once("Trying to GSO but underlying device doesn't support it.");
575 goto out;
577 gso = 1;
580 /* alloc head skb */
581 head = alloc_skb((gso ? packet->overhead : packet->size) +
582 MAX_HEADER, gfp);
583 if (!head)
584 goto out;
585 skb_reserve(head, packet->overhead + MAX_HEADER);
586 sctp_packet_set_owner_w(head, sk);
588 /* set sctp header */
589 sh = skb_push(head, sizeof(struct sctphdr));
590 skb_reset_transport_header(head);
591 sh->source = htons(packet->source_port);
592 sh->dest = htons(packet->destination_port);
593 sh->vtag = htonl(packet->vtag);
594 sh->checksum = 0;
596 /* drop packet if no dst */
597 dst = dst_clone(tp->dst);
598 if (!dst) {
599 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
600 kfree_skb(head);
601 goto out;
603 skb_dst_set(head, dst);
605 /* pack up chunks */
606 pkt_count = sctp_packet_pack(packet, head, gso, gfp);
607 if (!pkt_count) {
608 kfree_skb(head);
609 goto out;
611 pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head->len);
613 /* start autoclose timer */
614 if (packet->has_data && sctp_state(asoc, ESTABLISHED) &&
615 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
616 struct timer_list *timer =
617 &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
618 unsigned long timeout =
619 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
621 if (!mod_timer(timer, jiffies + timeout))
622 sctp_association_hold(asoc);
625 /* sctp xmit */
626 tp->af_specific->ecn_capable(sk);
627 if (asoc) {
628 asoc->stats.opackets += pkt_count;
629 if (asoc->peer.last_sent_to != tp)
630 asoc->peer.last_sent_to = tp;
632 head->ignore_df = packet->ipfragok;
633 if (tp->dst_pending_confirm)
634 skb_set_dst_pending_confirm(head, 1);
635 /* neighbour should be confirmed on successful transmission or
636 * positive error
638 if (tp->af_specific->sctp_xmit(head, tp) >= 0 &&
639 tp->dst_pending_confirm)
640 tp->dst_pending_confirm = 0;
642 out:
643 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
644 list_del_init(&chunk->list);
645 if (!sctp_chunk_is_data(chunk))
646 sctp_chunk_free(chunk);
648 sctp_packet_reset(packet);
649 return 0;
652 /********************************************************************
653 * 2nd Level Abstractions
654 ********************************************************************/
656 /* This private function check to see if a chunk can be added */
657 static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
658 struct sctp_chunk *chunk)
660 size_t datasize, rwnd, inflight, flight_size;
661 struct sctp_transport *transport = packet->transport;
662 struct sctp_association *asoc = transport->asoc;
663 struct sctp_outq *q = &asoc->outqueue;
665 /* RFC 2960 6.1 Transmission of DATA Chunks
667 * A) At any given time, the data sender MUST NOT transmit new data to
668 * any destination transport address if its peer's rwnd indicates
669 * that the peer has no buffer space (i.e. rwnd is 0, see Section
670 * 6.2.1). However, regardless of the value of rwnd (including if it
671 * is 0), the data sender can always have one DATA chunk in flight to
672 * the receiver if allowed by cwnd (see rule B below). This rule
673 * allows the sender to probe for a change in rwnd that the sender
674 * missed due to the SACK having been lost in transit from the data
675 * receiver to the data sender.
678 rwnd = asoc->peer.rwnd;
679 inflight = q->outstanding_bytes;
680 flight_size = transport->flight_size;
682 datasize = sctp_data_size(chunk);
684 if (datasize > rwnd && inflight > 0)
685 /* We have (at least) one data chunk in flight,
686 * so we can't fall back to rule 6.1 B).
688 return SCTP_XMIT_RWND_FULL;
690 /* RFC 2960 6.1 Transmission of DATA Chunks
692 * B) At any given time, the sender MUST NOT transmit new data
693 * to a given transport address if it has cwnd or more bytes
694 * of data outstanding to that transport address.
696 /* RFC 7.2.4 & the Implementers Guide 2.8.
698 * 3) ...
699 * When a Fast Retransmit is being performed the sender SHOULD
700 * ignore the value of cwnd and SHOULD NOT delay retransmission.
702 if (chunk->fast_retransmit != SCTP_NEED_FRTX &&
703 flight_size >= transport->cwnd)
704 return SCTP_XMIT_RWND_FULL;
706 /* Nagle's algorithm to solve small-packet problem:
707 * Inhibit the sending of new chunks when new outgoing data arrives
708 * if any previously transmitted data on the connection remains
709 * unacknowledged.
712 if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) &&
713 !asoc->force_delay)
714 /* Nothing unacked */
715 return SCTP_XMIT_OK;
717 if (!sctp_packet_empty(packet))
718 /* Append to packet */
719 return SCTP_XMIT_OK;
721 if (!sctp_state(asoc, ESTABLISHED))
722 return SCTP_XMIT_OK;
724 /* Check whether this chunk and all the rest of pending data will fit
725 * or delay in hopes of bundling a full sized packet.
727 if (chunk->skb->len + q->out_qlen > transport->pathmtu -
728 packet->overhead - sctp_datachk_len(&chunk->asoc->stream) - 4)
729 /* Enough data queued to fill a packet */
730 return SCTP_XMIT_OK;
732 /* Don't delay large message writes that may have been fragmented */
733 if (!chunk->msg->can_delay)
734 return SCTP_XMIT_OK;
736 /* Defer until all data acked or packet full */
737 return SCTP_XMIT_DELAY;
740 /* This private function does management things when adding DATA chunk */
741 static void sctp_packet_append_data(struct sctp_packet *packet,
742 struct sctp_chunk *chunk)
744 struct sctp_transport *transport = packet->transport;
745 size_t datasize = sctp_data_size(chunk);
746 struct sctp_association *asoc = transport->asoc;
747 u32 rwnd = asoc->peer.rwnd;
749 /* Keep track of how many bytes are in flight over this transport. */
750 transport->flight_size += datasize;
752 /* Keep track of how many bytes are in flight to the receiver. */
753 asoc->outqueue.outstanding_bytes += datasize;
755 /* Update our view of the receiver's rwnd. */
756 if (datasize < rwnd)
757 rwnd -= datasize;
758 else
759 rwnd = 0;
761 asoc->peer.rwnd = rwnd;
762 sctp_chunk_assign_tsn(chunk);
763 asoc->stream.si->assign_number(chunk);
766 static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
767 struct sctp_chunk *chunk,
768 u16 chunk_len)
770 enum sctp_xmit retval = SCTP_XMIT_OK;
771 size_t psize, pmtu, maxsize;
773 psize = packet->size;
774 if (packet->transport->asoc)
775 pmtu = packet->transport->asoc->pathmtu;
776 else
777 pmtu = packet->transport->pathmtu;
779 /* Decide if we need to fragment or resubmit later. */
780 if (psize + chunk_len > pmtu) {
781 /* It's OK to fragment at IP level if any one of the following
782 * is true:
783 * 1. The packet is empty (meaning this chunk is greater
784 * the MTU)
785 * 2. The packet doesn't have any data in it yet and data
786 * requires authentication.
788 if (sctp_packet_empty(packet) ||
789 (!packet->has_data && chunk->auth)) {
790 /* We no longer do re-fragmentation.
791 * Just fragment at the IP layer, if we
792 * actually hit this condition
794 packet->ipfragok = 1;
795 goto out;
798 /* Similarly, if this chunk was built before a PMTU
799 * reduction, we have to fragment it at IP level now. So
800 * if the packet already contains something, we need to
801 * flush.
803 maxsize = pmtu - packet->overhead;
804 if (packet->auth)
805 maxsize -= SCTP_PAD4(packet->auth->skb->len);
806 if (chunk_len > maxsize)
807 retval = SCTP_XMIT_PMTU_FULL;
809 /* It is also okay to fragment if the chunk we are
810 * adding is a control chunk, but only if current packet
811 * is not a GSO one otherwise it causes fragmentation of
812 * a large frame. So in this case we allow the
813 * fragmentation by forcing it to be in a new packet.
815 if (!sctp_chunk_is_data(chunk) && packet->has_data)
816 retval = SCTP_XMIT_PMTU_FULL;
818 if (psize + chunk_len > packet->max_size)
819 /* Hit GSO/PMTU limit, gotta flush */
820 retval = SCTP_XMIT_PMTU_FULL;
822 if (!packet->transport->burst_limited &&
823 psize + chunk_len > (packet->transport->cwnd >> 1))
824 /* Do not allow a single GSO packet to use more
825 * than half of cwnd.
827 retval = SCTP_XMIT_PMTU_FULL;
829 if (packet->transport->burst_limited &&
830 psize + chunk_len > (packet->transport->burst_limited >> 1))
831 /* Do not allow a single GSO packet to use more
832 * than half of original cwnd.
834 retval = SCTP_XMIT_PMTU_FULL;
835 /* Otherwise it will fit in the GSO packet */
838 out:
839 return retval;