4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/config.h>
14 #include <linux/dccp.h>
15 #include <linux/kernel.h>
16 #include <linux/skbuff.h>
24 static inline void dccp_event_ack_sent(struct sock
*sk
)
26 inet_csk_clear_xmit_timer(sk
, ICSK_TIME_DACK
);
29 static inline void dccp_skb_entail(struct sock
*sk
, struct sk_buff
*skb
)
31 skb_set_owner_w(skb
, sk
);
32 WARN_ON(sk
->sk_send_head
);
33 sk
->sk_send_head
= skb
;
37 * All SKB's seen here are completely headerless. It is our
38 * job to build the DCCP header, and pass the packet down to
39 * IP so it can do the same plus pass the packet off to the
42 static int dccp_transmit_skb(struct sock
*sk
, struct sk_buff
*skb
)
44 if (likely(skb
!= NULL
)) {
45 const struct inet_sock
*inet
= inet_sk(sk
);
46 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
47 struct dccp_sock
*dp
= dccp_sk(sk
);
48 struct dccp_skb_cb
*dcb
= DCCP_SKB_CB(skb
);
50 /* XXX For now we're using only 48 bits sequence numbers */
51 const int dccp_header_size
= sizeof(*dh
) +
52 sizeof(struct dccp_hdr_ext
) +
53 dccp_packet_hdr_len(dcb
->dccpd_type
);
55 u64 ackno
= dp
->dccps_gsr
;
57 dccp_inc_seqno(&dp
->dccps_gss
);
59 switch (dcb
->dccpd_type
) {
63 case DCCP_PKT_DATAACK
:
67 case DCCP_PKT_SYNCACK
:
68 ackno
= dcb
->dccpd_seq
;
72 * Only data packets should come through with skb->sk
76 skb_set_owner_w(skb
, sk
);
80 dcb
->dccpd_seq
= dp
->dccps_gss
;
81 dccp_insert_options(sk
, skb
);
83 skb
->h
.raw
= skb_push(skb
, dccp_header_size
);
86 /* Build DCCP header and checksum it. */
87 memset(dh
, 0, dccp_header_size
);
88 dh
->dccph_type
= dcb
->dccpd_type
;
89 dh
->dccph_sport
= inet
->sport
;
90 dh
->dccph_dport
= inet
->dport
;
91 dh
->dccph_doff
= (dccp_header_size
+ dcb
->dccpd_opt_len
) / 4;
92 dh
->dccph_ccval
= dcb
->dccpd_ccval
;
93 /* XXX For now we're using only 48 bits sequence numbers */
96 dp
->dccps_awh
= dp
->dccps_gss
;
97 dccp_hdr_set_seq(dh
, dp
->dccps_gss
);
99 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb
), ackno
);
101 switch (dcb
->dccpd_type
) {
102 case DCCP_PKT_REQUEST
:
103 dccp_hdr_request(skb
)->dccph_req_service
=
107 dccp_hdr_reset(skb
)->dccph_reset_code
=
108 dcb
->dccpd_reset_code
;
112 icsk
->icsk_af_ops
->send_check(sk
, skb
->len
, skb
);
115 dccp_event_ack_sent(sk
);
117 DCCP_INC_STATS(DCCP_MIB_OUTSEGS
);
119 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
120 err
= icsk
->icsk_af_ops
->queue_xmit(skb
, 0);
124 /* NET_XMIT_CN is special. It does not guarantee,
125 * that this packet is lost. It tells that device
126 * is about to start to drop packets or already
127 * drops some packets of the same priority and
128 * invokes us to send less aggressively.
130 return err
== NET_XMIT_CN
? 0 : err
;
135 unsigned int dccp_sync_mss(struct sock
*sk
, u32 pmtu
)
137 struct dccp_sock
*dp
= dccp_sk(sk
);
138 int mss_now
= (pmtu
- inet_csk(sk
)->icsk_af_ops
->net_header_len
-
139 sizeof(struct dccp_hdr
) - sizeof(struct dccp_hdr_ext
));
141 /* Now subtract optional transport overhead */
142 mss_now
-= dp
->dccps_ext_header_len
;
145 * FIXME: this should come from the CCID infrastructure, where, say,
146 * TFRC will say it wants TIMESTAMPS, ELAPSED time, etc, for now lets
147 * put a rough estimate for NDP + TIMESTAMP + TIMESTAMP_ECHO + ELAPSED
148 * TIME + TFRC_OPT_LOSS_EVENT_RATE + TFRC_OPT_RECEIVE_RATE + padding to
149 * make it a multiple of 4
152 mss_now
-= ((5 + 6 + 10 + 6 + 6 + 6 + 3) / 4) * 4;
154 /* And store cached results */
155 dp
->dccps_pmtu_cookie
= pmtu
;
156 dp
->dccps_mss_cache
= mss_now
;
161 EXPORT_SYMBOL_GPL(dccp_sync_mss
);
163 void dccp_write_space(struct sock
*sk
)
165 read_lock(&sk
->sk_callback_lock
);
167 if (sk
->sk_sleep
&& waitqueue_active(sk
->sk_sleep
))
168 wake_up_interruptible(sk
->sk_sleep
);
169 /* Should agree with poll, otherwise some programs break */
170 if (sock_writeable(sk
))
171 sk_wake_async(sk
, 2, POLL_OUT
);
173 read_unlock(&sk
->sk_callback_lock
);
177 * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet
178 * @sk: socket to wait for
179 * @timeo: for how long
181 static int dccp_wait_for_ccid(struct sock
*sk
, struct sk_buff
*skb
,
184 struct dccp_sock
*dp
= dccp_sk(sk
);
190 prepare_to_wait(sk
->sk_sleep
, &wait
, TASK_INTERRUPTIBLE
);
192 if (sk
->sk_err
|| (sk
->sk_shutdown
& SEND_SHUTDOWN
))
196 if (signal_pending(current
))
199 rc
= ccid_hc_tx_send_packet(dp
->dccps_hc_tx_ccid
, sk
, skb
,
203 delay
= msecs_to_jiffies(rc
);
204 if (delay
> *timeo
|| delay
< 0)
207 sk
->sk_write_pending
++;
209 *timeo
-= schedule_timeout(delay
);
211 sk
->sk_write_pending
--;
214 finish_wait(sk
->sk_sleep
, &wait
);
224 rc
= sock_intr_errno(*timeo
);
228 int dccp_write_xmit(struct sock
*sk
, struct sk_buff
*skb
, long *timeo
)
230 const struct dccp_sock
*dp
= dccp_sk(sk
);
231 int err
= ccid_hc_tx_send_packet(dp
->dccps_hc_tx_ccid
, sk
, skb
,
235 err
= dccp_wait_for_ccid(sk
, skb
, timeo
);
238 struct dccp_skb_cb
*dcb
= DCCP_SKB_CB(skb
);
239 const int len
= skb
->len
;
241 if (sk
->sk_state
== DCCP_PARTOPEN
) {
242 /* See 8.1.5. Handshake Completion */
243 inet_csk_schedule_ack(sk
);
244 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_DACK
,
245 inet_csk(sk
)->icsk_rto
,
247 dcb
->dccpd_type
= DCCP_PKT_DATAACK
;
248 } else if (dccp_ack_pending(sk
))
249 dcb
->dccpd_type
= DCCP_PKT_DATAACK
;
251 dcb
->dccpd_type
= DCCP_PKT_DATA
;
253 err
= dccp_transmit_skb(sk
, skb
);
254 ccid_hc_tx_packet_sent(dp
->dccps_hc_tx_ccid
, sk
, 0, len
);
261 int dccp_retransmit_skb(struct sock
*sk
, struct sk_buff
*skb
)
263 if (inet_csk(sk
)->icsk_af_ops
->rebuild_header(sk
) != 0)
264 return -EHOSTUNREACH
; /* Routing failure or similar. */
266 return dccp_transmit_skb(sk
, (skb_cloned(skb
) ?
267 pskb_copy(skb
, GFP_ATOMIC
):
268 skb_clone(skb
, GFP_ATOMIC
)));
271 struct sk_buff
*dccp_make_response(struct sock
*sk
, struct dst_entry
*dst
,
272 struct request_sock
*req
)
275 struct dccp_request_sock
*dreq
;
276 const int dccp_header_size
= sizeof(struct dccp_hdr
) +
277 sizeof(struct dccp_hdr_ext
) +
278 sizeof(struct dccp_hdr_response
);
279 struct sk_buff
*skb
= sock_wmalloc(sk
, MAX_HEADER
+ DCCP_MAX_OPT_LEN
+
285 /* Reserve space for headers. */
286 skb_reserve(skb
, MAX_HEADER
+ DCCP_MAX_OPT_LEN
+ dccp_header_size
);
288 skb
->dst
= dst_clone(dst
);
291 dreq
= dccp_rsk(req
);
292 DCCP_SKB_CB(skb
)->dccpd_type
= DCCP_PKT_RESPONSE
;
293 DCCP_SKB_CB(skb
)->dccpd_seq
= dreq
->dreq_iss
;
294 dccp_insert_options(sk
, skb
);
296 skb
->h
.raw
= skb_push(skb
, dccp_header_size
);
299 memset(dh
, 0, dccp_header_size
);
301 dh
->dccph_sport
= inet_sk(sk
)->sport
;
302 dh
->dccph_dport
= inet_rsk(req
)->rmt_port
;
303 dh
->dccph_doff
= (dccp_header_size
+
304 DCCP_SKB_CB(skb
)->dccpd_opt_len
) / 4;
305 dh
->dccph_type
= DCCP_PKT_RESPONSE
;
307 dccp_hdr_set_seq(dh
, dreq
->dreq_iss
);
308 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb
), dreq
->dreq_isr
);
309 dccp_hdr_response(skb
)->dccph_resp_service
= dreq
->dreq_service
;
311 dh
->dccph_checksum
= dccp_v4_checksum(skb
, inet_rsk(req
)->loc_addr
,
312 inet_rsk(req
)->rmt_addr
);
314 DCCP_INC_STATS(DCCP_MIB_OUTSEGS
);
318 EXPORT_SYMBOL_GPL(dccp_make_response
);
320 struct sk_buff
*dccp_make_reset(struct sock
*sk
, struct dst_entry
*dst
,
321 const enum dccp_reset_codes code
)
325 struct dccp_sock
*dp
= dccp_sk(sk
);
326 const int dccp_header_size
= sizeof(struct dccp_hdr
) +
327 sizeof(struct dccp_hdr_ext
) +
328 sizeof(struct dccp_hdr_reset
);
329 struct sk_buff
*skb
= sock_wmalloc(sk
, MAX_HEADER
+ DCCP_MAX_OPT_LEN
+
335 /* Reserve space for headers. */
336 skb_reserve(skb
, MAX_HEADER
+ DCCP_MAX_OPT_LEN
+ dccp_header_size
);
338 skb
->dst
= dst_clone(dst
);
341 dccp_inc_seqno(&dp
->dccps_gss
);
343 DCCP_SKB_CB(skb
)->dccpd_reset_code
= code
;
344 DCCP_SKB_CB(skb
)->dccpd_type
= DCCP_PKT_RESET
;
345 DCCP_SKB_CB(skb
)->dccpd_seq
= dp
->dccps_gss
;
346 dccp_insert_options(sk
, skb
);
348 skb
->h
.raw
= skb_push(skb
, dccp_header_size
);
351 memset(dh
, 0, dccp_header_size
);
353 dh
->dccph_sport
= inet_sk(sk
)->sport
;
354 dh
->dccph_dport
= inet_sk(sk
)->dport
;
355 dh
->dccph_doff
= (dccp_header_size
+
356 DCCP_SKB_CB(skb
)->dccpd_opt_len
) / 4;
357 dh
->dccph_type
= DCCP_PKT_RESET
;
359 dccp_hdr_set_seq(dh
, dp
->dccps_gss
);
360 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb
), dp
->dccps_gsr
);
362 dccp_hdr_reset(skb
)->dccph_reset_code
= code
;
364 dh
->dccph_checksum
= dccp_v4_checksum(skb
, inet_sk(sk
)->saddr
,
367 DCCP_INC_STATS(DCCP_MIB_OUTSEGS
);
372 * Do all connect socket setups that can be done AF independent.
374 static inline void dccp_connect_init(struct sock
*sk
)
376 struct dccp_sock
*dp
= dccp_sk(sk
);
377 struct dst_entry
*dst
= __sk_dst_get(sk
);
378 struct inet_connection_sock
*icsk
= inet_csk(sk
);
381 sock_reset_flag(sk
, SOCK_DONE
);
383 dccp_sync_mss(sk
, dst_mtu(dst
));
385 dccp_update_gss(sk
, dp
->dccps_iss
);
387 * SWL and AWL are initially adjusted so that they are not less than
388 * the initial Sequence Numbers received and sent, respectively:
389 * SWL := max(GSR + 1 - floor(W/4), ISR),
390 * AWL := max(GSS - W' + 1, ISS).
391 * These adjustments MUST be applied only at the beginning of the
394 dccp_set_seqno(&dp
->dccps_awl
, max48(dp
->dccps_awl
, dp
->dccps_iss
));
396 icsk
->icsk_retransmits
= 0;
399 int dccp_connect(struct sock
*sk
)
402 struct inet_connection_sock
*icsk
= inet_csk(sk
);
404 dccp_connect_init(sk
);
406 skb
= alloc_skb(MAX_DCCP_HEADER
+ 15, sk
->sk_allocation
);
407 if (unlikely(skb
== NULL
))
410 /* Reserve space for headers. */
411 skb_reserve(skb
, MAX_DCCP_HEADER
);
413 DCCP_SKB_CB(skb
)->dccpd_type
= DCCP_PKT_REQUEST
;
416 dccp_skb_entail(sk
, skb
);
417 dccp_transmit_skb(sk
, skb_clone(skb
, GFP_KERNEL
));
418 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS
);
420 /* Timer for repeating the REQUEST until an answer. */
421 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_RETRANS
,
422 icsk
->icsk_rto
, DCCP_RTO_MAX
);
426 EXPORT_SYMBOL_GPL(dccp_connect
);
428 void dccp_send_ack(struct sock
*sk
)
430 /* If we have been reset, we may not send again. */
431 if (sk
->sk_state
!= DCCP_CLOSED
) {
432 struct sk_buff
*skb
= alloc_skb(MAX_DCCP_HEADER
, GFP_ATOMIC
);
435 inet_csk_schedule_ack(sk
);
436 inet_csk(sk
)->icsk_ack
.ato
= TCP_ATO_MIN
;
437 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_DACK
,
443 /* Reserve space for headers */
444 skb_reserve(skb
, MAX_DCCP_HEADER
);
446 DCCP_SKB_CB(skb
)->dccpd_type
= DCCP_PKT_ACK
;
447 dccp_transmit_skb(sk
, skb
);
451 EXPORT_SYMBOL_GPL(dccp_send_ack
);
453 void dccp_send_delayed_ack(struct sock
*sk
)
455 struct inet_connection_sock
*icsk
= inet_csk(sk
);
457 * FIXME: tune this timer. elapsed time fixes the skew, so no problem
458 * with using 2s, and active senders also piggyback the ACK into a
459 * DATAACK packet, so this is really for quiescent senders.
461 unsigned long timeout
= jiffies
+ 2 * HZ
;
463 /* Use new timeout only if there wasn't a older one earlier. */
464 if (icsk
->icsk_ack
.pending
& ICSK_ACK_TIMER
) {
465 /* If delack timer was blocked or is about to expire,
468 * FIXME: check the "about to expire" part
470 if (icsk
->icsk_ack
.blocked
) {
475 if (!time_before(timeout
, icsk
->icsk_ack
.timeout
))
476 timeout
= icsk
->icsk_ack
.timeout
;
478 icsk
->icsk_ack
.pending
|= ICSK_ACK_SCHED
| ICSK_ACK_TIMER
;
479 icsk
->icsk_ack
.timeout
= timeout
;
480 sk_reset_timer(sk
, &icsk
->icsk_delack_timer
, timeout
);
483 void dccp_send_sync(struct sock
*sk
, const u64 seq
,
484 const enum dccp_pkt_type pkt_type
)
487 * We are not putting this on the write queue, so
488 * dccp_transmit_skb() will set the ownership to this
491 struct sk_buff
*skb
= alloc_skb(MAX_DCCP_HEADER
, GFP_ATOMIC
);
494 /* FIXME: how to make sure the sync is sent? */
497 /* Reserve space for headers and prepare control bits. */
498 skb_reserve(skb
, MAX_DCCP_HEADER
);
500 DCCP_SKB_CB(skb
)->dccpd_type
= pkt_type
;
501 DCCP_SKB_CB(skb
)->dccpd_seq
= seq
;
503 dccp_transmit_skb(sk
, skb
);
507 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
508 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
511 void dccp_send_close(struct sock
*sk
, const int active
)
513 struct dccp_sock
*dp
= dccp_sk(sk
);
515 const gfp_t prio
= active
? GFP_KERNEL
: GFP_ATOMIC
;
517 skb
= alloc_skb(sk
->sk_prot
->max_header
, prio
);
521 /* Reserve space for headers and prepare control bits. */
522 skb_reserve(skb
, sk
->sk_prot
->max_header
);
524 DCCP_SKB_CB(skb
)->dccpd_type
= dp
->dccps_role
== DCCP_ROLE_CLIENT
?
525 DCCP_PKT_CLOSE
: DCCP_PKT_CLOSEREQ
;
528 dccp_skb_entail(sk
, skb
);
529 dccp_transmit_skb(sk
, skb_clone(skb
, prio
));
531 dccp_transmit_skb(sk
, skb
);