4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/config.h>
14 #include <linux/dccp.h>
15 #include <linux/skbuff.h>
23 static void dccp_fin(struct sock
*sk
, struct sk_buff
*skb
)
25 sk
->sk_shutdown
|= RCV_SHUTDOWN
;
26 sock_set_flag(sk
, SOCK_DONE
);
27 __skb_pull(skb
, dccp_hdr(skb
)->dccph_doff
* 4);
28 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
29 skb_set_owner_r(skb
, sk
);
30 sk
->sk_data_ready(sk
, 0);
33 static void dccp_rcv_close(struct sock
*sk
, struct sk_buff
*skb
)
35 dccp_v4_send_reset(sk
, DCCP_RESET_CODE_CLOSED
);
37 dccp_set_state(sk
, DCCP_CLOSED
);
38 sk_wake_async(sk
, 1, POLL_HUP
);
41 static void dccp_rcv_closereq(struct sock
*sk
, struct sk_buff
*skb
)
44 * Step 7: Check for unexpected packet types
45 * If (S.is_server and P.type == CloseReq)
46 * Send Sync packet acknowledging P.seqno
47 * Drop packet and return
49 if (dccp_sk(sk
)->dccps_role
!= DCCP_ROLE_CLIENT
) {
50 dccp_send_sync(sk
, DCCP_SKB_CB(skb
)->dccpd_seq
, DCCP_PKT_SYNC
);
54 if (sk
->sk_state
!= DCCP_CLOSING
)
55 dccp_set_state(sk
, DCCP_CLOSING
);
56 dccp_send_close(sk
, 0);
59 static inline void dccp_event_ack_recv(struct sock
*sk
, struct sk_buff
*skb
)
61 struct dccp_sock
*dp
= dccp_sk(sk
);
63 if (dp
->dccps_options
.dccpo_send_ack_vector
)
64 dccp_ackvec_check_rcv_ackno(dp
->dccps_hc_rx_ackvec
, sk
,
65 DCCP_SKB_CB(skb
)->dccpd_ack_seq
);
68 static int dccp_check_seqno(struct sock
*sk
, struct sk_buff
*skb
)
70 const struct dccp_hdr
*dh
= dccp_hdr(skb
);
71 struct dccp_sock
*dp
= dccp_sk(sk
);
75 * Step 5: Prepare sequence numbers for Sync
76 * If P.type == Sync or P.type == SyncAck,
77 * If S.AWL <= P.ackno <= S.AWH and P.seqno >= S.SWL,
78 * / * P is valid, so update sequence number variables
79 * accordingly. After this update, P will pass the tests
80 * in Step 6. A SyncAck is generated if necessary in
82 * Update S.GSR, S.SWL, S.SWH
84 * Drop packet and return
86 if (dh
->dccph_type
== DCCP_PKT_SYNC
||
87 dh
->dccph_type
== DCCP_PKT_SYNCACK
) {
88 if (between48(DCCP_SKB_CB(skb
)->dccpd_ack_seq
,
89 dp
->dccps_awl
, dp
->dccps_awh
) &&
90 !before48(DCCP_SKB_CB(skb
)->dccpd_seq
, dp
->dccps_swl
))
91 dccp_update_gsr(sk
, DCCP_SKB_CB(skb
)->dccpd_seq
);
97 * Step 6: Check sequence numbers
98 * Let LSWL = S.SWL and LAWL = S.AWL
99 * If P.type == CloseReq or P.type == Close or P.type == Reset,
100 * LSWL := S.GSR + 1, LAWL := S.GAR
101 * If LSWL <= P.seqno <= S.SWH
102 * and (P.ackno does not exist or LAWL <= P.ackno <= S.AWH),
103 * Update S.GSR, S.SWL, S.SWH
107 * Send Sync packet acknowledging P.seqno
108 * Drop packet and return
110 lswl
= dp
->dccps_swl
;
111 lawl
= dp
->dccps_awl
;
113 if (dh
->dccph_type
== DCCP_PKT_CLOSEREQ
||
114 dh
->dccph_type
== DCCP_PKT_CLOSE
||
115 dh
->dccph_type
== DCCP_PKT_RESET
) {
116 lswl
= dp
->dccps_gsr
;
117 dccp_inc_seqno(&lswl
);
118 lawl
= dp
->dccps_gar
;
121 if (between48(DCCP_SKB_CB(skb
)->dccpd_seq
, lswl
, dp
->dccps_swh
) &&
122 (DCCP_SKB_CB(skb
)->dccpd_ack_seq
== DCCP_PKT_WITHOUT_ACK_SEQ
||
123 between48(DCCP_SKB_CB(skb
)->dccpd_ack_seq
,
124 lawl
, dp
->dccps_awh
))) {
125 dccp_update_gsr(sk
, DCCP_SKB_CB(skb
)->dccpd_seq
);
127 if (dh
->dccph_type
!= DCCP_PKT_SYNC
&&
128 (DCCP_SKB_CB(skb
)->dccpd_ack_seq
!=
129 DCCP_PKT_WITHOUT_ACK_SEQ
))
130 dp
->dccps_gar
= DCCP_SKB_CB(skb
)->dccpd_ack_seq
;
132 LIMIT_NETDEBUG(KERN_WARNING
"DCCP: Step 6 failed for %s packet, "
133 "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
134 "(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), "
136 dccp_packet_name(dh
->dccph_type
),
137 (unsigned long long) lswl
,
139 DCCP_SKB_CB(skb
)->dccpd_seq
,
140 (unsigned long long) dp
->dccps_swh
,
141 (DCCP_SKB_CB(skb
)->dccpd_ack_seq
==
142 DCCP_PKT_WITHOUT_ACK_SEQ
) ? "doesn't exist" : "exists",
143 (unsigned long long) lawl
,
145 DCCP_SKB_CB(skb
)->dccpd_ack_seq
,
146 (unsigned long long) dp
->dccps_awh
);
147 dccp_send_sync(sk
, DCCP_SKB_CB(skb
)->dccpd_seq
, DCCP_PKT_SYNC
);
154 static inline int __dccp_rcv_established(struct sock
*sk
, struct sk_buff
*skb
,
155 const struct dccp_hdr
*dh
,
158 struct dccp_sock
*dp
= dccp_sk(sk
);
160 switch (dccp_hdr(skb
)->dccph_type
) {
161 case DCCP_PKT_DATAACK
:
164 * FIXME: check if sk_receive_queue is full, schedule DATA_DROPPED
167 __skb_pull(skb
, dh
->dccph_doff
* 4);
168 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
169 skb_set_owner_r(skb
, sk
);
170 sk
->sk_data_ready(sk
, 0);
176 * Step 9: Process Reset
177 * If P.type == Reset,
178 * Tear down connection
179 * S.state := TIMEWAIT
181 * Drop packet and return
184 dccp_time_wait(sk
, DCCP_TIME_WAIT
, 0);
186 case DCCP_PKT_CLOSEREQ
:
187 dccp_rcv_closereq(sk
, skb
);
190 dccp_rcv_close(sk
, skb
);
192 case DCCP_PKT_REQUEST
:
194 * or (S.is_server and P.type == Response)
195 * or (S.is_client and P.type == Request)
196 * or (S.state >= OPEN and P.type == Request
197 * and P.seqno >= S.OSR)
198 * or (S.state >= OPEN and P.type == Response
199 * and P.seqno >= S.OSR)
200 * or (S.state == RESPOND and P.type == Data),
201 * Send Sync packet acknowledging P.seqno
202 * Drop packet and return
204 if (dp
->dccps_role
!= DCCP_ROLE_LISTEN
)
207 case DCCP_PKT_RESPONSE
:
208 if (dp
->dccps_role
!= DCCP_ROLE_CLIENT
)
211 if (!before48(DCCP_SKB_CB(skb
)->dccpd_seq
, dp
->dccps_osr
)) {
213 dccp_send_sync(sk
, DCCP_SKB_CB(skb
)->dccpd_seq
,
218 dccp_send_sync(sk
, DCCP_SKB_CB(skb
)->dccpd_seq
,
223 * As with DCCP-Ack packets, DCCP-Sync and DCCP-SyncAck packets
224 * MAY have non-zero-length application data areas, whose
225 * contents * receivers MUST ignore.
230 DCCP_INC_STATS_BH(DCCP_MIB_INERRS
);
236 int dccp_rcv_established(struct sock
*sk
, struct sk_buff
*skb
,
237 const struct dccp_hdr
*dh
, const unsigned len
)
239 struct dccp_sock
*dp
= dccp_sk(sk
);
241 if (dccp_check_seqno(sk
, skb
))
244 if (dccp_parse_options(sk
, skb
))
247 if (DCCP_SKB_CB(skb
)->dccpd_ack_seq
!= DCCP_PKT_WITHOUT_ACK_SEQ
)
248 dccp_event_ack_recv(sk
, skb
);
250 if (dp
->dccps_options
.dccpo_send_ack_vector
&&
251 dccp_ackvec_add(dp
->dccps_hc_rx_ackvec
, sk
,
252 DCCP_SKB_CB(skb
)->dccpd_seq
,
253 DCCP_ACKVEC_STATE_RECEIVED
))
256 ccid_hc_rx_packet_recv(dp
->dccps_hc_rx_ccid
, sk
, skb
);
257 ccid_hc_tx_packet_recv(dp
->dccps_hc_tx_ccid
, sk
, skb
);
259 return __dccp_rcv_established(sk
, skb
, dh
, len
);
265 EXPORT_SYMBOL_GPL(dccp_rcv_established
);
267 static int dccp_rcv_request_sent_state_process(struct sock
*sk
,
269 const struct dccp_hdr
*dh
,
273 * Step 4: Prepare sequence numbers in REQUEST
274 * If S.state == REQUEST,
275 * If (P.type == Response or P.type == Reset)
276 * and S.AWL <= P.ackno <= S.AWH,
277 * / * Set sequence number variables corresponding to the
278 * other endpoint, so P will pass the tests in Step 6 * /
279 * Set S.GSR, S.ISR, S.SWL, S.SWH
280 * / * Response processing continues in Step 10; Reset
281 * processing continues in Step 9 * /
283 if (dh
->dccph_type
== DCCP_PKT_RESPONSE
) {
284 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
285 struct dccp_sock
*dp
= dccp_sk(sk
);
287 /* Stop the REQUEST timer */
288 inet_csk_clear_xmit_timer(sk
, ICSK_TIME_RETRANS
);
289 BUG_TRAP(sk
->sk_send_head
!= NULL
);
290 __kfree_skb(sk
->sk_send_head
);
291 sk
->sk_send_head
= NULL
;
293 if (!between48(DCCP_SKB_CB(skb
)->dccpd_ack_seq
,
294 dp
->dccps_awl
, dp
->dccps_awh
)) {
295 dccp_pr_debug("invalid ackno: S.AWL=%llu, "
296 "P.ackno=%llu, S.AWH=%llu \n",
297 (unsigned long long)dp
->dccps_awl
,
298 (unsigned long long)DCCP_SKB_CB(skb
)->dccpd_ack_seq
,
299 (unsigned long long)dp
->dccps_awh
);
300 goto out_invalid_packet
;
303 if (dp
->dccps_options
.dccpo_send_ack_vector
&&
304 dccp_ackvec_add(dp
->dccps_hc_rx_ackvec
, sk
,
305 DCCP_SKB_CB(skb
)->dccpd_seq
,
306 DCCP_ACKVEC_STATE_RECEIVED
))
307 goto out_invalid_packet
; /* FIXME: change error code */
309 dp
->dccps_isr
= DCCP_SKB_CB(skb
)->dccpd_seq
;
310 dccp_update_gsr(sk
, dp
->dccps_isr
);
312 * SWL and AWL are initially adjusted so that they are not less than
313 * the initial Sequence Numbers received and sent, respectively:
314 * SWL := max(GSR + 1 - floor(W/4), ISR),
315 * AWL := max(GSS - W' + 1, ISS).
316 * These adjustments MUST be applied only at the beginning of the
319 * AWL was adjusted in dccp_v4_connect -acme
321 dccp_set_seqno(&dp
->dccps_swl
,
322 max48(dp
->dccps_swl
, dp
->dccps_isr
));
324 if (ccid_hc_rx_init(dp
->dccps_hc_rx_ccid
, sk
) != 0 ||
325 ccid_hc_tx_init(dp
->dccps_hc_tx_ccid
, sk
) != 0) {
326 ccid_hc_rx_exit(dp
->dccps_hc_rx_ccid
, sk
);
327 ccid_hc_tx_exit(dp
->dccps_hc_tx_ccid
, sk
);
328 /* FIXME: send appropriate RESET code */
329 goto out_invalid_packet
;
332 dccp_sync_mss(sk
, icsk
->icsk_pmtu_cookie
);
335 * Step 10: Process REQUEST state (second part)
336 * If S.state == REQUEST,
337 * / * If we get here, P is a valid Response from the
338 * server (see Step 4), and we should move to
339 * PARTOPEN state. PARTOPEN means send an Ack,
340 * don't send Data packets, retransmit Acks
341 * periodically, and always include any Init Cookie
342 * from the Response * /
343 * S.state := PARTOPEN
345 * Continue with S.state == PARTOPEN
346 * / * Step 12 will send the Ack completing the
347 * three-way handshake * /
349 dccp_set_state(sk
, DCCP_PARTOPEN
);
351 /* Make sure socket is routed, for correct metrics. */
352 icsk
->icsk_af_ops
->rebuild_header(sk
);
354 if (!sock_flag(sk
, SOCK_DEAD
)) {
355 sk
->sk_state_change(sk
);
356 sk_wake_async(sk
, 0, POLL_OUT
);
359 if (sk
->sk_write_pending
|| icsk
->icsk_ack
.pingpong
||
360 icsk
->icsk_accept_queue
.rskq_defer_accept
) {
361 /* Save one ACK. Data will be ready after
362 * several ticks, if write_pending is set.
364 * It may be deleted, but with this feature tcpdumps
365 * look so _wonderfully_ clever, that I was not able
366 * to stand against the temptation 8) --ANK
369 * OK, in DCCP we can as well do a similar trick, its
370 * even in the draft, but there is no need for us to
371 * schedule an ack here, as dccp_sendmsg does this for
372 * us, also stated in the draft. -acme
382 /* dccp_v4_do_rcv will send a reset */
383 DCCP_SKB_CB(skb
)->dccpd_reset_code
= DCCP_RESET_CODE_PACKET_ERROR
;
387 static int dccp_rcv_respond_partopen_state_process(struct sock
*sk
,
389 const struct dccp_hdr
*dh
,
394 switch (dh
->dccph_type
) {
396 inet_csk_clear_xmit_timer(sk
, ICSK_TIME_DACK
);
399 if (sk
->sk_state
== DCCP_RESPOND
)
401 case DCCP_PKT_DATAACK
:
404 * FIXME: we should be reseting the PARTOPEN (DELACK) timer
405 * here but only if we haven't used the DELACK timer for
406 * something else, like sending a delayed ack for a TIMESTAMP
407 * echo, etc, for now were not clearing it, sending an extra
408 * ACK when there is nothing else to do in DELACK is not a big
412 /* Stop the PARTOPEN timer */
413 if (sk
->sk_state
== DCCP_PARTOPEN
)
414 inet_csk_clear_xmit_timer(sk
, ICSK_TIME_DACK
);
416 dccp_sk(sk
)->dccps_osr
= DCCP_SKB_CB(skb
)->dccpd_seq
;
417 dccp_set_state(sk
, DCCP_OPEN
);
419 if (dh
->dccph_type
== DCCP_PKT_DATAACK
||
420 dh
->dccph_type
== DCCP_PKT_DATA
) {
421 __dccp_rcv_established(sk
, skb
, dh
, len
);
422 queued
= 1; /* packet was queued
423 (by __dccp_rcv_established) */
431 int dccp_rcv_state_process(struct sock
*sk
, struct sk_buff
*skb
,
432 struct dccp_hdr
*dh
, unsigned len
)
434 struct dccp_sock
*dp
= dccp_sk(sk
);
435 struct dccp_skb_cb
*dcb
= DCCP_SKB_CB(skb
);
436 const int old_state
= sk
->sk_state
;
440 * Step 3: Process LISTEN state
441 * (Continuing from dccp_v4_do_rcv and dccp_v6_do_rcv)
443 * If S.state == LISTEN,
444 * If P.type == Request or P contains a valid Init Cookie
446 * * Must scan the packet's options to check for an Init
447 * Cookie. Only the Init Cookie is processed here,
448 * however; other options are processed in Step 8. This
449 * scan need only be performed if the endpoint uses Init
451 * * Generate a new socket and switch to that socket *
452 * Set S := new socket for this port pair
454 * Choose S.ISS (initial seqno) or set from Init Cookie
455 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
456 * Continue with S.state == RESPOND
457 * * A Response packet will be generated in Step 11 *
459 * Generate Reset(No Connection) unless P.type == Reset
460 * Drop packet and return
462 * NOTE: the check for the packet types is done in
463 * dccp_rcv_state_process
465 if (sk
->sk_state
== DCCP_LISTEN
) {
466 if (dh
->dccph_type
== DCCP_PKT_REQUEST
) {
467 if (inet_csk(sk
)->icsk_af_ops
->conn_request(sk
,
471 /* FIXME: do congestion control initialization */
474 if (dh
->dccph_type
== DCCP_PKT_RESET
)
477 /* Caller (dccp_v4_do_rcv) will send Reset */
478 dcb
->dccpd_reset_code
= DCCP_RESET_CODE_NO_CONNECTION
;
482 if (sk
->sk_state
!= DCCP_REQUESTING
) {
483 if (dccp_check_seqno(sk
, skb
))
487 * Step 8: Process options and mark acknowledgeable
489 if (dccp_parse_options(sk
, skb
))
492 if (dcb
->dccpd_ack_seq
!= DCCP_PKT_WITHOUT_ACK_SEQ
)
493 dccp_event_ack_recv(sk
, skb
);
495 if (dp
->dccps_options
.dccpo_send_ack_vector
&&
496 dccp_ackvec_add(dp
->dccps_hc_rx_ackvec
, sk
,
497 DCCP_SKB_CB(skb
)->dccpd_seq
,
498 DCCP_ACKVEC_STATE_RECEIVED
))
501 ccid_hc_rx_packet_recv(dp
->dccps_hc_rx_ccid
, sk
, skb
);
502 ccid_hc_tx_packet_recv(dp
->dccps_hc_tx_ccid
, sk
, skb
);
506 * Step 9: Process Reset
507 * If P.type == Reset,
508 * Tear down connection
509 * S.state := TIMEWAIT
511 * Drop packet and return
513 if (dh
->dccph_type
== DCCP_PKT_RESET
) {
515 * Queue the equivalent of TCP fin so that dccp_recvmsg
519 dccp_time_wait(sk
, DCCP_TIME_WAIT
, 0);
522 * Step 7: Check for unexpected packet types
523 * If (S.is_server and P.type == CloseReq)
524 * or (S.is_server and P.type == Response)
525 * or (S.is_client and P.type == Request)
526 * or (S.state == RESPOND and P.type == Data),
527 * Send Sync packet acknowledging P.seqno
528 * Drop packet and return
530 } else if ((dp
->dccps_role
!= DCCP_ROLE_CLIENT
&&
531 (dh
->dccph_type
== DCCP_PKT_RESPONSE
||
532 dh
->dccph_type
== DCCP_PKT_CLOSEREQ
)) ||
533 (dp
->dccps_role
== DCCP_ROLE_CLIENT
&&
534 dh
->dccph_type
== DCCP_PKT_REQUEST
) ||
535 (sk
->sk_state
== DCCP_RESPOND
&&
536 dh
->dccph_type
== DCCP_PKT_DATA
)) {
537 dccp_send_sync(sk
, dcb
->dccpd_seq
, DCCP_PKT_SYNC
);
539 } else if (dh
->dccph_type
== DCCP_PKT_CLOSEREQ
) {
540 dccp_rcv_closereq(sk
, skb
);
542 } else if (dh
->dccph_type
== DCCP_PKT_CLOSE
) {
543 dccp_rcv_close(sk
, skb
);
547 if (unlikely(dh
->dccph_type
== DCCP_PKT_SYNC
)) {
548 dccp_send_sync(sk
, dcb
->dccpd_seq
, DCCP_PKT_SYNCACK
);
552 switch (sk
->sk_state
) {
554 dcb
->dccpd_reset_code
= DCCP_RESET_CODE_NO_CONNECTION
;
557 case DCCP_REQUESTING
:
558 /* FIXME: do congestion control initialization */
560 queued
= dccp_rcv_request_sent_state_process(sk
, skb
, dh
, len
);
569 queued
= dccp_rcv_respond_partopen_state_process(sk
, skb
,
574 if (dh
->dccph_type
== DCCP_PKT_ACK
||
575 dh
->dccph_type
== DCCP_PKT_DATAACK
) {
578 sk
->sk_state_change(sk
);
579 sk_wake_async(sk
, 0, POLL_OUT
);
591 EXPORT_SYMBOL_GPL(dccp_rcv_state_process
);