Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/scottwood/linux...
[linux/fpc-iii.git] / net / dccp / output.c
blob0248e8a3460c829bf8da8b47b8b7a525f0b85473
1 /*
2 * net/dccp/output.c
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/dccp.h>
14 #include <linux/kernel.h>
15 #include <linux/skbuff.h>
16 #include <linux/slab.h>
18 #include <net/inet_sock.h>
19 #include <net/sock.h>
21 #include "ackvec.h"
22 #include "ccid.h"
23 #include "dccp.h"
25 static inline void dccp_event_ack_sent(struct sock *sk)
27 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
30 /* enqueue @skb on sk_send_head for retransmission, return clone to send now */
31 static struct sk_buff *dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
33 skb_set_owner_w(skb, sk);
34 WARN_ON(sk->sk_send_head);
35 sk->sk_send_head = skb;
36 return skb_clone(sk->sk_send_head, gfp_any());
40 * All SKB's seen here are completely headerless. It is our
41 * job to build the DCCP header, and pass the packet down to
42 * IP so it can do the same plus pass the packet off to the
43 * device.
45 static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
47 if (likely(skb != NULL)) {
48 struct inet_sock *inet = inet_sk(sk);
49 const struct inet_connection_sock *icsk = inet_csk(sk);
50 struct dccp_sock *dp = dccp_sk(sk);
51 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
52 struct dccp_hdr *dh;
53 /* XXX For now we're using only 48 bits sequence numbers */
54 const u32 dccp_header_size = sizeof(*dh) +
55 sizeof(struct dccp_hdr_ext) +
56 dccp_packet_hdr_len(dcb->dccpd_type);
57 int err, set_ack = 1;
58 u64 ackno = dp->dccps_gsr;
60 * Increment GSS here already in case the option code needs it.
61 * Update GSS for real only if option processing below succeeds.
63 dcb->dccpd_seq = ADD48(dp->dccps_gss, 1);
65 switch (dcb->dccpd_type) {
66 case DCCP_PKT_DATA:
67 set_ack = 0;
68 /* fall through */
69 case DCCP_PKT_DATAACK:
70 case DCCP_PKT_RESET:
71 break;
73 case DCCP_PKT_REQUEST:
74 set_ack = 0;
75 /* Use ISS on the first (non-retransmitted) Request. */
76 if (icsk->icsk_retransmits == 0)
77 dcb->dccpd_seq = dp->dccps_iss;
78 /* fall through */
80 case DCCP_PKT_SYNC:
81 case DCCP_PKT_SYNCACK:
82 ackno = dcb->dccpd_ack_seq;
83 /* fall through */
84 default:
86 * Set owner/destructor: some skbs are allocated via
87 * alloc_skb (e.g. when retransmission may happen).
88 * Only Data, DataAck, and Reset packets should come
89 * through here with skb->sk set.
91 WARN_ON(skb->sk);
92 skb_set_owner_w(skb, sk);
93 break;
96 if (dccp_insert_options(sk, skb)) {
97 kfree_skb(skb);
98 return -EPROTO;
102 /* Build DCCP header and checksum it. */
103 dh = dccp_zeroed_hdr(skb, dccp_header_size);
104 dh->dccph_type = dcb->dccpd_type;
105 dh->dccph_sport = inet->inet_sport;
106 dh->dccph_dport = inet->inet_dport;
107 dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4;
108 dh->dccph_ccval = dcb->dccpd_ccval;
109 dh->dccph_cscov = dp->dccps_pcslen;
110 /* XXX For now we're using only 48 bits sequence numbers */
111 dh->dccph_x = 1;
113 dccp_update_gss(sk, dcb->dccpd_seq);
114 dccp_hdr_set_seq(dh, dp->dccps_gss);
115 if (set_ack)
116 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);
118 switch (dcb->dccpd_type) {
119 case DCCP_PKT_REQUEST:
120 dccp_hdr_request(skb)->dccph_req_service =
121 dp->dccps_service;
123 * Limit Ack window to ISS <= P.ackno <= GSS, so that
124 * only Responses to Requests we sent are considered.
126 dp->dccps_awl = dp->dccps_iss;
127 break;
128 case DCCP_PKT_RESET:
129 dccp_hdr_reset(skb)->dccph_reset_code =
130 dcb->dccpd_reset_code;
131 break;
134 icsk->icsk_af_ops->send_check(sk, skb);
136 if (set_ack)
137 dccp_event_ack_sent(sk);
139 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
141 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
142 return net_xmit_eval(err);
144 return -ENOBUFS;
148 * dccp_determine_ccmps - Find out about CCID-specific packet-size limits
149 * We only consider the HC-sender CCID for setting the CCMPS (RFC 4340, 14.),
150 * since the RX CCID is restricted to feedback packets (Acks), which are small
151 * in comparison with the data traffic. A value of 0 means "no current CCMPS".
153 static u32 dccp_determine_ccmps(const struct dccp_sock *dp)
155 const struct ccid *tx_ccid = dp->dccps_hc_tx_ccid;
157 if (tx_ccid == NULL || tx_ccid->ccid_ops == NULL)
158 return 0;
159 return tx_ccid->ccid_ops->ccid_ccmps;
162 unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
164 struct inet_connection_sock *icsk = inet_csk(sk);
165 struct dccp_sock *dp = dccp_sk(sk);
166 u32 ccmps = dccp_determine_ccmps(dp);
167 u32 cur_mps = ccmps ? min(pmtu, ccmps) : pmtu;
169 /* Account for header lengths and IPv4/v6 option overhead */
170 cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len +
171 sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext));
174 * Leave enough headroom for common DCCP header options.
175 * This only considers options which may appear on DCCP-Data packets, as
176 * per table 3 in RFC 4340, 5.8. When running out of space for other
177 * options (eg. Ack Vector which can take up to 255 bytes), it is better
178 * to schedule a separate Ack. Thus we leave headroom for the following:
179 * - 1 byte for Slow Receiver (11.6)
180 * - 6 bytes for Timestamp (13.1)
181 * - 10 bytes for Timestamp Echo (13.3)
182 * - 8 bytes for NDP count (7.7, when activated)
183 * - 6 bytes for Data Checksum (9.3)
184 * - %DCCPAV_MIN_OPTLEN bytes for Ack Vector size (11.4, when enabled)
186 cur_mps -= roundup(1 + 6 + 10 + dp->dccps_send_ndp_count * 8 + 6 +
187 (dp->dccps_hc_rx_ackvec ? DCCPAV_MIN_OPTLEN : 0), 4);
189 /* And store cached results */
190 icsk->icsk_pmtu_cookie = pmtu;
191 dp->dccps_mss_cache = cur_mps;
193 return cur_mps;
196 EXPORT_SYMBOL_GPL(dccp_sync_mss);
198 void dccp_write_space(struct sock *sk)
200 struct socket_wq *wq;
202 rcu_read_lock();
203 wq = rcu_dereference(sk->sk_wq);
204 if (wq_has_sleeper(wq))
205 wake_up_interruptible(&wq->wait);
206 /* Should agree with poll, otherwise some programs break */
207 if (sock_writeable(sk))
208 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
210 rcu_read_unlock();
214 * dccp_wait_for_ccid - Await CCID send permission
215 * @sk: socket to wait for
216 * @delay: timeout in jiffies
218 * This is used by CCIDs which need to delay the send time in process context.
220 static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
222 DEFINE_WAIT(wait);
223 long remaining;
225 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
226 sk->sk_write_pending++;
227 release_sock(sk);
229 remaining = schedule_timeout(delay);
231 lock_sock(sk);
232 sk->sk_write_pending--;
233 finish_wait(sk_sleep(sk), &wait);
235 if (signal_pending(current) || sk->sk_err)
236 return -1;
237 return remaining;
241 * dccp_xmit_packet - Send data packet under control of CCID
242 * Transmits next-queued payload and informs CCID to account for the packet.
244 static void dccp_xmit_packet(struct sock *sk)
246 int err, len;
247 struct dccp_sock *dp = dccp_sk(sk);
248 struct sk_buff *skb = dccp_qpolicy_pop(sk);
250 if (unlikely(skb == NULL))
251 return;
252 len = skb->len;
254 if (sk->sk_state == DCCP_PARTOPEN) {
255 const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
257 * See 8.1.5 - Handshake Completion.
259 * For robustness we resend Confirm options until the client has
260 * entered OPEN. During the initial feature negotiation, the MPS
261 * is smaller than usual, reduced by the Change/Confirm options.
263 if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
264 DCCP_WARN("Payload too large (%d) for featneg.\n", len);
265 dccp_send_ack(sk);
266 dccp_feat_list_purge(&dp->dccps_featneg);
269 inet_csk_schedule_ack(sk);
270 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
271 inet_csk(sk)->icsk_rto,
272 DCCP_RTO_MAX);
273 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
274 } else if (dccp_ack_pending(sk)) {
275 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
276 } else {
277 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA;
280 err = dccp_transmit_skb(sk, skb);
281 if (err)
282 dccp_pr_debug("transmit_skb() returned err=%d\n", err);
284 * Register this one as sent even if an error occurred. To the remote
285 * end a local packet drop is indistinguishable from network loss, i.e.
286 * any local drop will eventually be reported via receiver feedback.
288 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
291 * If the CCID needs to transfer additional header options out-of-band
292 * (e.g. Ack Vectors or feature-negotiation options), it activates this
293 * flag to schedule a Sync. The Sync will automatically incorporate all
294 * currently pending header options, thus clearing the backlog.
296 if (dp->dccps_sync_scheduled)
297 dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
301 * dccp_flush_write_queue - Drain queue at end of connection
302 * Since dccp_sendmsg queues packets without waiting for them to be sent, it may
303 * happen that the TX queue is not empty at the end of a connection. We give the
304 * HC-sender CCID a grace period of up to @time_budget jiffies. If this function
305 * returns with a non-empty write queue, it will be purged later.
307 void dccp_flush_write_queue(struct sock *sk, long *time_budget)
309 struct dccp_sock *dp = dccp_sk(sk);
310 struct sk_buff *skb;
311 long delay, rc;
313 while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) {
314 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
316 switch (ccid_packet_dequeue_eval(rc)) {
317 case CCID_PACKET_WILL_DEQUEUE_LATER:
319 * If the CCID determines when to send, the next sending
320 * time is unknown or the CCID may not even send again
321 * (e.g. remote host crashes or lost Ack packets).
323 DCCP_WARN("CCID did not manage to send all packets\n");
324 return;
325 case CCID_PACKET_DELAY:
326 delay = msecs_to_jiffies(rc);
327 if (delay > *time_budget)
328 return;
329 rc = dccp_wait_for_ccid(sk, delay);
330 if (rc < 0)
331 return;
332 *time_budget -= (delay - rc);
333 /* check again if we can send now */
334 break;
335 case CCID_PACKET_SEND_AT_ONCE:
336 dccp_xmit_packet(sk);
337 break;
338 case CCID_PACKET_ERR:
339 skb_dequeue(&sk->sk_write_queue);
340 kfree_skb(skb);
341 dccp_pr_debug("packet discarded due to err=%ld\n", rc);
346 void dccp_write_xmit(struct sock *sk)
348 struct dccp_sock *dp = dccp_sk(sk);
349 struct sk_buff *skb;
351 while ((skb = dccp_qpolicy_top(sk))) {
352 int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
354 switch (ccid_packet_dequeue_eval(rc)) {
355 case CCID_PACKET_WILL_DEQUEUE_LATER:
356 return;
357 case CCID_PACKET_DELAY:
358 sk_reset_timer(sk, &dp->dccps_xmit_timer,
359 jiffies + msecs_to_jiffies(rc));
360 return;
361 case CCID_PACKET_SEND_AT_ONCE:
362 dccp_xmit_packet(sk);
363 break;
364 case CCID_PACKET_ERR:
365 dccp_qpolicy_drop(sk, skb);
366 dccp_pr_debug("packet discarded due to err=%d\n", rc);
372 * dccp_retransmit_skb - Retransmit Request, Close, or CloseReq packets
373 * There are only four retransmittable packet types in DCCP:
374 * - Request in client-REQUEST state (sec. 8.1.1),
375 * - CloseReq in server-CLOSEREQ state (sec. 8.3),
376 * - Close in node-CLOSING state (sec. 8.3),
377 * - Acks in client-PARTOPEN state (sec. 8.1.5, handled by dccp_delack_timer()).
378 * This function expects sk->sk_send_head to contain the original skb.
380 int dccp_retransmit_skb(struct sock *sk)
382 WARN_ON(sk->sk_send_head == NULL);
384 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0)
385 return -EHOSTUNREACH; /* Routing failure or similar. */
387 /* this count is used to distinguish original and retransmitted skb */
388 inet_csk(sk)->icsk_retransmits++;
390 return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC));
393 struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
394 struct request_sock *req)
396 struct dccp_hdr *dh;
397 struct dccp_request_sock *dreq;
398 const u32 dccp_header_size = sizeof(struct dccp_hdr) +
399 sizeof(struct dccp_hdr_ext) +
400 sizeof(struct dccp_hdr_response);
401 struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1,
402 GFP_ATOMIC);
403 if (skb == NULL)
404 return NULL;
406 /* Reserve space for headers. */
407 skb_reserve(skb, sk->sk_prot->max_header);
409 skb_dst_set(skb, dst_clone(dst));
411 dreq = dccp_rsk(req);
412 if (inet_rsk(req)->acked) /* increase GSS upon retransmission */
413 dccp_inc_seqno(&dreq->dreq_gss);
414 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
415 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_gss;
417 /* Resolve feature dependencies resulting from choice of CCID */
418 if (dccp_feat_server_ccid_dependencies(dreq))
419 goto response_failed;
421 if (dccp_insert_options_rsk(dreq, skb))
422 goto response_failed;
424 /* Build and checksum header */
425 dh = dccp_zeroed_hdr(skb, dccp_header_size);
427 dh->dccph_sport = htons(inet_rsk(req)->ir_num);
428 dh->dccph_dport = inet_rsk(req)->ir_rmt_port;
429 dh->dccph_doff = (dccp_header_size +
430 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
431 dh->dccph_type = DCCP_PKT_RESPONSE;
432 dh->dccph_x = 1;
433 dccp_hdr_set_seq(dh, dreq->dreq_gss);
434 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_gsr);
435 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
437 dccp_csum_outgoing(skb);
439 /* We use `acked' to remember that a Response was already sent. */
440 inet_rsk(req)->acked = 1;
441 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
442 return skb;
443 response_failed:
444 kfree_skb(skb);
445 return NULL;
448 EXPORT_SYMBOL_GPL(dccp_make_response);
450 /* answer offending packet in @rcv_skb with Reset from control socket @ctl */
451 struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb)
453 struct dccp_hdr *rxdh = dccp_hdr(rcv_skb), *dh;
454 struct dccp_skb_cb *dcb = DCCP_SKB_CB(rcv_skb);
455 const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
456 sizeof(struct dccp_hdr_ext) +
457 sizeof(struct dccp_hdr_reset);
458 struct dccp_hdr_reset *dhr;
459 struct sk_buff *skb;
461 skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
462 if (skb == NULL)
463 return NULL;
465 skb_reserve(skb, sk->sk_prot->max_header);
467 /* Swap the send and the receive. */
468 dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len);
469 dh->dccph_type = DCCP_PKT_RESET;
470 dh->dccph_sport = rxdh->dccph_dport;
471 dh->dccph_dport = rxdh->dccph_sport;
472 dh->dccph_doff = dccp_hdr_reset_len / 4;
473 dh->dccph_x = 1;
475 dhr = dccp_hdr_reset(skb);
476 dhr->dccph_reset_code = dcb->dccpd_reset_code;
478 switch (dcb->dccpd_reset_code) {
479 case DCCP_RESET_CODE_PACKET_ERROR:
480 dhr->dccph_reset_data[0] = rxdh->dccph_type;
481 break;
482 case DCCP_RESET_CODE_OPTION_ERROR: /* fall through */
483 case DCCP_RESET_CODE_MANDATORY_ERROR:
484 memcpy(dhr->dccph_reset_data, dcb->dccpd_reset_data, 3);
485 break;
488 * From RFC 4340, 8.3.1:
489 * If P.ackno exists, set R.seqno := P.ackno + 1.
490 * Else set R.seqno := 0.
492 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
493 dccp_hdr_set_seq(dh, ADD48(dcb->dccpd_ack_seq, 1));
494 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dcb->dccpd_seq);
496 dccp_csum_outgoing(skb);
497 return skb;
500 EXPORT_SYMBOL_GPL(dccp_ctl_make_reset);
502 /* send Reset on established socket, to close or abort the connection */
503 int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code)
505 struct sk_buff *skb;
507 * FIXME: what if rebuild_header fails?
508 * Should we be doing a rebuild_header here?
510 int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk);
512 if (err != 0)
513 return err;
515 skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC);
516 if (skb == NULL)
517 return -ENOBUFS;
519 /* Reserve space for headers and prepare control bits. */
520 skb_reserve(skb, sk->sk_prot->max_header);
521 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET;
522 DCCP_SKB_CB(skb)->dccpd_reset_code = code;
524 return dccp_transmit_skb(sk, skb);
528 * Do all connect socket setups that can be done AF independent.
530 int dccp_connect(struct sock *sk)
532 struct sk_buff *skb;
533 struct dccp_sock *dp = dccp_sk(sk);
534 struct dst_entry *dst = __sk_dst_get(sk);
535 struct inet_connection_sock *icsk = inet_csk(sk);
537 sk->sk_err = 0;
538 sock_reset_flag(sk, SOCK_DONE);
540 dccp_sync_mss(sk, dst_mtu(dst));
542 /* do not connect if feature negotiation setup fails */
543 if (dccp_feat_finalise_settings(dccp_sk(sk)))
544 return -EPROTO;
546 /* Initialise GAR as per 8.5; AWL/AWH are set in dccp_transmit_skb() */
547 dp->dccps_gar = dp->dccps_iss;
549 skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation);
550 if (unlikely(skb == NULL))
551 return -ENOBUFS;
553 /* Reserve space for headers. */
554 skb_reserve(skb, sk->sk_prot->max_header);
556 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
558 dccp_transmit_skb(sk, dccp_skb_entail(sk, skb));
559 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);
561 /* Timer for repeating the REQUEST until an answer. */
562 icsk->icsk_retransmits = 0;
563 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
564 icsk->icsk_rto, DCCP_RTO_MAX);
565 return 0;
568 EXPORT_SYMBOL_GPL(dccp_connect);
570 void dccp_send_ack(struct sock *sk)
572 /* If we have been reset, we may not send again. */
573 if (sk->sk_state != DCCP_CLOSED) {
574 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header,
575 GFP_ATOMIC);
577 if (skb == NULL) {
578 inet_csk_schedule_ack(sk);
579 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
580 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
581 TCP_DELACK_MAX,
582 DCCP_RTO_MAX);
583 return;
586 /* Reserve space for headers */
587 skb_reserve(skb, sk->sk_prot->max_header);
588 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
589 dccp_transmit_skb(sk, skb);
593 EXPORT_SYMBOL_GPL(dccp_send_ack);
595 #if 0
596 /* FIXME: Is this still necessary (11.3) - currently nowhere used by DCCP. */
597 void dccp_send_delayed_ack(struct sock *sk)
599 struct inet_connection_sock *icsk = inet_csk(sk);
601 * FIXME: tune this timer. elapsed time fixes the skew, so no problem
602 * with using 2s, and active senders also piggyback the ACK into a
603 * DATAACK packet, so this is really for quiescent senders.
605 unsigned long timeout = jiffies + 2 * HZ;
607 /* Use new timeout only if there wasn't a older one earlier. */
608 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
609 /* If delack timer was blocked or is about to expire,
610 * send ACK now.
612 * FIXME: check the "about to expire" part
614 if (icsk->icsk_ack.blocked) {
615 dccp_send_ack(sk);
616 return;
619 if (!time_before(timeout, icsk->icsk_ack.timeout))
620 timeout = icsk->icsk_ack.timeout;
622 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
623 icsk->icsk_ack.timeout = timeout;
624 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
626 #endif
628 void dccp_send_sync(struct sock *sk, const u64 ackno,
629 const enum dccp_pkt_type pkt_type)
632 * We are not putting this on the write queue, so
633 * dccp_transmit_skb() will set the ownership to this
634 * sock.
636 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
638 if (skb == NULL) {
639 /* FIXME: how to make sure the sync is sent? */
640 DCCP_CRIT("could not send %s", dccp_packet_name(pkt_type));
641 return;
644 /* Reserve space for headers and prepare control bits. */
645 skb_reserve(skb, sk->sk_prot->max_header);
646 DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
647 DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno;
650 * Clear the flag in case the Sync was scheduled for out-of-band data,
651 * such as carrying a long Ack Vector.
653 dccp_sk(sk)->dccps_sync_scheduled = 0;
655 dccp_transmit_skb(sk, skb);
658 EXPORT_SYMBOL_GPL(dccp_send_sync);
661 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
662 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
663 * any circumstances.
665 void dccp_send_close(struct sock *sk, const int active)
667 struct dccp_sock *dp = dccp_sk(sk);
668 struct sk_buff *skb;
669 const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;
671 skb = alloc_skb(sk->sk_prot->max_header, prio);
672 if (skb == NULL)
673 return;
675 /* Reserve space for headers and prepare control bits. */
676 skb_reserve(skb, sk->sk_prot->max_header);
677 if (dp->dccps_role == DCCP_ROLE_SERVER && !dp->dccps_server_timewait)
678 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSEREQ;
679 else
680 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE;
682 if (active) {
683 skb = dccp_skb_entail(sk, skb);
685 * Retransmission timer for active-close: RFC 4340, 8.3 requires
686 * to retransmit the Close/CloseReq until the CLOSING/CLOSEREQ
687 * state can be left. The initial timeout is 2 RTTs.
688 * Since RTT measurement is done by the CCIDs, there is no easy
689 * way to get an RTT sample. The fallback RTT from RFC 4340, 3.4
690 * is too low (200ms); we use a high value to avoid unnecessary
691 * retransmissions when the link RTT is > 0.2 seconds.
692 * FIXME: Let main module sample RTTs and use that instead.
694 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
695 DCCP_TIMEOUT_INIT, DCCP_RTO_MAX);
697 dccp_transmit_skb(sk, skb);