4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/dccp.h>
14 #include <linux/skbuff.h>
22 static void dccp_fin(struct sock
*sk
, struct sk_buff
*skb
)
24 sk
->sk_shutdown
|= RCV_SHUTDOWN
;
25 sock_set_flag(sk
, SOCK_DONE
);
26 __skb_pull(skb
, dccp_hdr(skb
)->dccph_doff
* 4);
27 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
28 skb_set_owner_r(skb
, sk
);
29 sk
->sk_data_ready(sk
, 0);
32 static void dccp_rcv_close(struct sock
*sk
, struct sk_buff
*skb
)
34 dccp_send_reset(sk
, DCCP_RESET_CODE_CLOSED
);
36 dccp_set_state(sk
, DCCP_CLOSED
);
37 sk_wake_async(sk
, 1, POLL_HUP
);
40 static void dccp_rcv_closereq(struct sock
*sk
, struct sk_buff
*skb
)
43 * Step 7: Check for unexpected packet types
44 * If (S.is_server and P.type == CloseReq)
45 * Send Sync packet acknowledging P.seqno
46 * Drop packet and return
48 if (dccp_sk(sk
)->dccps_role
!= DCCP_ROLE_CLIENT
) {
49 dccp_send_sync(sk
, DCCP_SKB_CB(skb
)->dccpd_seq
, DCCP_PKT_SYNC
);
53 if (sk
->sk_state
!= DCCP_CLOSING
)
54 dccp_set_state(sk
, DCCP_CLOSING
);
55 dccp_send_close(sk
, 0);
58 static void dccp_event_ack_recv(struct sock
*sk
, struct sk_buff
*skb
)
60 struct dccp_sock
*dp
= dccp_sk(sk
);
62 if (dccp_msk(sk
)->dccpms_send_ack_vector
)
63 dccp_ackvec_check_rcv_ackno(dp
->dccps_hc_rx_ackvec
, sk
,
64 DCCP_SKB_CB(skb
)->dccpd_ack_seq
);
67 static int dccp_check_seqno(struct sock
*sk
, struct sk_buff
*skb
)
69 const struct dccp_hdr
*dh
= dccp_hdr(skb
);
70 struct dccp_sock
*dp
= dccp_sk(sk
);
74 * Step 5: Prepare sequence numbers for Sync
75 * If P.type == Sync or P.type == SyncAck,
76 * If S.AWL <= P.ackno <= S.AWH and P.seqno >= S.SWL,
77 * / * P is valid, so update sequence number variables
78 * accordingly. After this update, P will pass the tests
79 * in Step 6. A SyncAck is generated if necessary in
81 * Update S.GSR, S.SWL, S.SWH
83 * Drop packet and return
85 if (dh
->dccph_type
== DCCP_PKT_SYNC
||
86 dh
->dccph_type
== DCCP_PKT_SYNCACK
) {
87 if (between48(DCCP_SKB_CB(skb
)->dccpd_ack_seq
,
88 dp
->dccps_awl
, dp
->dccps_awh
) &&
89 !before48(DCCP_SKB_CB(skb
)->dccpd_seq
, dp
->dccps_swl
))
90 dccp_update_gsr(sk
, DCCP_SKB_CB(skb
)->dccpd_seq
);
96 * Step 6: Check sequence numbers
97 * Let LSWL = S.SWL and LAWL = S.AWL
98 * If P.type == CloseReq or P.type == Close or P.type == Reset,
99 * LSWL := S.GSR + 1, LAWL := S.GAR
100 * If LSWL <= P.seqno <= S.SWH
101 * and (P.ackno does not exist or LAWL <= P.ackno <= S.AWH),
102 * Update S.GSR, S.SWL, S.SWH
106 * Send Sync packet acknowledging P.seqno
107 * Drop packet and return
109 lswl
= dp
->dccps_swl
;
110 lawl
= dp
->dccps_awl
;
112 if (dh
->dccph_type
== DCCP_PKT_CLOSEREQ
||
113 dh
->dccph_type
== DCCP_PKT_CLOSE
||
114 dh
->dccph_type
== DCCP_PKT_RESET
) {
115 lswl
= dp
->dccps_gsr
;
116 dccp_inc_seqno(&lswl
);
117 lawl
= dp
->dccps_gar
;
120 if (between48(DCCP_SKB_CB(skb
)->dccpd_seq
, lswl
, dp
->dccps_swh
) &&
121 (DCCP_SKB_CB(skb
)->dccpd_ack_seq
== DCCP_PKT_WITHOUT_ACK_SEQ
||
122 between48(DCCP_SKB_CB(skb
)->dccpd_ack_seq
,
123 lawl
, dp
->dccps_awh
))) {
124 dccp_update_gsr(sk
, DCCP_SKB_CB(skb
)->dccpd_seq
);
126 if (dh
->dccph_type
!= DCCP_PKT_SYNC
&&
127 (DCCP_SKB_CB(skb
)->dccpd_ack_seq
!=
128 DCCP_PKT_WITHOUT_ACK_SEQ
))
129 dp
->dccps_gar
= DCCP_SKB_CB(skb
)->dccpd_ack_seq
;
131 LIMIT_NETDEBUG(KERN_WARNING
"DCCP: Step 6 failed for %s packet, "
132 "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
133 "(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), "
135 dccp_packet_name(dh
->dccph_type
),
136 (unsigned long long) lswl
,
138 DCCP_SKB_CB(skb
)->dccpd_seq
,
139 (unsigned long long) dp
->dccps_swh
,
140 (DCCP_SKB_CB(skb
)->dccpd_ack_seq
==
141 DCCP_PKT_WITHOUT_ACK_SEQ
) ? "doesn't exist" : "exists",
142 (unsigned long long) lawl
,
144 DCCP_SKB_CB(skb
)->dccpd_ack_seq
,
145 (unsigned long long) dp
->dccps_awh
);
146 dccp_send_sync(sk
, DCCP_SKB_CB(skb
)->dccpd_seq
, DCCP_PKT_SYNC
);
153 static int __dccp_rcv_established(struct sock
*sk
, struct sk_buff
*skb
,
154 const struct dccp_hdr
*dh
, const unsigned len
)
156 struct dccp_sock
*dp
= dccp_sk(sk
);
158 switch (dccp_hdr(skb
)->dccph_type
) {
159 case DCCP_PKT_DATAACK
:
162 * FIXME: check if sk_receive_queue is full, schedule DATA_DROPPED
165 __skb_pull(skb
, dh
->dccph_doff
* 4);
166 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
167 skb_set_owner_r(skb
, sk
);
168 sk
->sk_data_ready(sk
, 0);
174 * Step 9: Process Reset
175 * If P.type == Reset,
176 * Tear down connection
177 * S.state := TIMEWAIT
179 * Drop packet and return
182 dccp_time_wait(sk
, DCCP_TIME_WAIT
, 0);
184 case DCCP_PKT_CLOSEREQ
:
185 dccp_rcv_closereq(sk
, skb
);
188 dccp_rcv_close(sk
, skb
);
190 case DCCP_PKT_REQUEST
:
192 * or (S.is_server and P.type == Response)
193 * or (S.is_client and P.type == Request)
194 * or (S.state >= OPEN and P.type == Request
195 * and P.seqno >= S.OSR)
196 * or (S.state >= OPEN and P.type == Response
197 * and P.seqno >= S.OSR)
198 * or (S.state == RESPOND and P.type == Data),
199 * Send Sync packet acknowledging P.seqno
200 * Drop packet and return
202 if (dp
->dccps_role
!= DCCP_ROLE_LISTEN
)
205 case DCCP_PKT_RESPONSE
:
206 if (dp
->dccps_role
!= DCCP_ROLE_CLIENT
)
209 if (!before48(DCCP_SKB_CB(skb
)->dccpd_seq
, dp
->dccps_osr
)) {
211 dccp_send_sync(sk
, DCCP_SKB_CB(skb
)->dccpd_seq
,
216 dccp_send_sync(sk
, DCCP_SKB_CB(skb
)->dccpd_seq
,
219 * From RFC 4340, sec. 5.7
221 * As with DCCP-Ack packets, DCCP-Sync and DCCP-SyncAck packets
222 * MAY have non-zero-length application data areas, whose
223 * contents receivers MUST ignore.
228 DCCP_INC_STATS_BH(DCCP_MIB_INERRS
);
234 int dccp_rcv_established(struct sock
*sk
, struct sk_buff
*skb
,
235 const struct dccp_hdr
*dh
, const unsigned len
)
237 struct dccp_sock
*dp
= dccp_sk(sk
);
239 if (dccp_check_seqno(sk
, skb
))
242 if (dccp_parse_options(sk
, skb
))
245 if (DCCP_SKB_CB(skb
)->dccpd_ack_seq
!= DCCP_PKT_WITHOUT_ACK_SEQ
)
246 dccp_event_ack_recv(sk
, skb
);
248 if (dccp_msk(sk
)->dccpms_send_ack_vector
&&
249 dccp_ackvec_add(dp
->dccps_hc_rx_ackvec
, sk
,
250 DCCP_SKB_CB(skb
)->dccpd_seq
,
251 DCCP_ACKVEC_STATE_RECEIVED
))
254 ccid_hc_rx_packet_recv(dp
->dccps_hc_rx_ccid
, sk
, skb
);
255 ccid_hc_tx_packet_recv(dp
->dccps_hc_tx_ccid
, sk
, skb
);
257 return __dccp_rcv_established(sk
, skb
, dh
, len
);
263 EXPORT_SYMBOL_GPL(dccp_rcv_established
);
265 static int dccp_rcv_request_sent_state_process(struct sock
*sk
,
267 const struct dccp_hdr
*dh
,
271 * Step 4: Prepare sequence numbers in REQUEST
272 * If S.state == REQUEST,
273 * If (P.type == Response or P.type == Reset)
274 * and S.AWL <= P.ackno <= S.AWH,
275 * / * Set sequence number variables corresponding to the
276 * other endpoint, so P will pass the tests in Step 6 * /
277 * Set S.GSR, S.ISR, S.SWL, S.SWH
278 * / * Response processing continues in Step 10; Reset
279 * processing continues in Step 9 * /
281 if (dh
->dccph_type
== DCCP_PKT_RESPONSE
) {
282 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
283 struct dccp_sock
*dp
= dccp_sk(sk
);
285 /* Stop the REQUEST timer */
286 inet_csk_clear_xmit_timer(sk
, ICSK_TIME_RETRANS
);
287 BUG_TRAP(sk
->sk_send_head
!= NULL
);
288 __kfree_skb(sk
->sk_send_head
);
289 sk
->sk_send_head
= NULL
;
291 if (!between48(DCCP_SKB_CB(skb
)->dccpd_ack_seq
,
292 dp
->dccps_awl
, dp
->dccps_awh
)) {
293 dccp_pr_debug("invalid ackno: S.AWL=%llu, "
294 "P.ackno=%llu, S.AWH=%llu \n",
295 (unsigned long long)dp
->dccps_awl
,
296 (unsigned long long)DCCP_SKB_CB(skb
)->dccpd_ack_seq
,
297 (unsigned long long)dp
->dccps_awh
);
298 goto out_invalid_packet
;
301 if (dccp_parse_options(sk
, skb
))
302 goto out_invalid_packet
;
304 if (dccp_msk(sk
)->dccpms_send_ack_vector
&&
305 dccp_ackvec_add(dp
->dccps_hc_rx_ackvec
, sk
,
306 DCCP_SKB_CB(skb
)->dccpd_seq
,
307 DCCP_ACKVEC_STATE_RECEIVED
))
308 goto out_invalid_packet
; /* FIXME: change error code */
310 dp
->dccps_isr
= DCCP_SKB_CB(skb
)->dccpd_seq
;
311 dccp_update_gsr(sk
, dp
->dccps_isr
);
313 * SWL and AWL are initially adjusted so that they are not less than
314 * the initial Sequence Numbers received and sent, respectively:
315 * SWL := max(GSR + 1 - floor(W/4), ISR),
316 * AWL := max(GSS - W' + 1, ISS).
317 * These adjustments MUST be applied only at the beginning of the
320 * AWL was adjusted in dccp_v4_connect -acme
322 dccp_set_seqno(&dp
->dccps_swl
,
323 max48(dp
->dccps_swl
, dp
->dccps_isr
));
325 dccp_sync_mss(sk
, icsk
->icsk_pmtu_cookie
);
328 * Step 10: Process REQUEST state (second part)
329 * If S.state == REQUEST,
330 * / * If we get here, P is a valid Response from the
331 * server (see Step 4), and we should move to
332 * PARTOPEN state. PARTOPEN means send an Ack,
333 * don't send Data packets, retransmit Acks
334 * periodically, and always include any Init Cookie
335 * from the Response * /
336 * S.state := PARTOPEN
338 * Continue with S.state == PARTOPEN
339 * / * Step 12 will send the Ack completing the
340 * three-way handshake * /
342 dccp_set_state(sk
, DCCP_PARTOPEN
);
344 /* Make sure socket is routed, for correct metrics. */
345 icsk
->icsk_af_ops
->rebuild_header(sk
);
347 if (!sock_flag(sk
, SOCK_DEAD
)) {
348 sk
->sk_state_change(sk
);
349 sk_wake_async(sk
, 0, POLL_OUT
);
352 if (sk
->sk_write_pending
|| icsk
->icsk_ack
.pingpong
||
353 icsk
->icsk_accept_queue
.rskq_defer_accept
) {
354 /* Save one ACK. Data will be ready after
355 * several ticks, if write_pending is set.
357 * It may be deleted, but with this feature tcpdumps
358 * look so _wonderfully_ clever, that I was not able
359 * to stand against the temptation 8) --ANK
362 * OK, in DCCP we can as well do a similar trick, its
363 * even in the draft, but there is no need for us to
364 * schedule an ack here, as dccp_sendmsg does this for
365 * us, also stated in the draft. -acme
375 /* dccp_v4_do_rcv will send a reset */
376 DCCP_SKB_CB(skb
)->dccpd_reset_code
= DCCP_RESET_CODE_PACKET_ERROR
;
380 static int dccp_rcv_respond_partopen_state_process(struct sock
*sk
,
382 const struct dccp_hdr
*dh
,
387 switch (dh
->dccph_type
) {
389 inet_csk_clear_xmit_timer(sk
, ICSK_TIME_DACK
);
392 if (sk
->sk_state
== DCCP_RESPOND
)
394 case DCCP_PKT_DATAACK
:
397 * FIXME: we should be reseting the PARTOPEN (DELACK) timer
398 * here but only if we haven't used the DELACK timer for
399 * something else, like sending a delayed ack for a TIMESTAMP
400 * echo, etc, for now were not clearing it, sending an extra
401 * ACK when there is nothing else to do in DELACK is not a big
405 /* Stop the PARTOPEN timer */
406 if (sk
->sk_state
== DCCP_PARTOPEN
)
407 inet_csk_clear_xmit_timer(sk
, ICSK_TIME_DACK
);
409 dccp_sk(sk
)->dccps_osr
= DCCP_SKB_CB(skb
)->dccpd_seq
;
410 dccp_set_state(sk
, DCCP_OPEN
);
412 if (dh
->dccph_type
== DCCP_PKT_DATAACK
||
413 dh
->dccph_type
== DCCP_PKT_DATA
) {
414 __dccp_rcv_established(sk
, skb
, dh
, len
);
415 queued
= 1; /* packet was queued
416 (by __dccp_rcv_established) */
424 int dccp_rcv_state_process(struct sock
*sk
, struct sk_buff
*skb
,
425 struct dccp_hdr
*dh
, unsigned len
)
427 struct dccp_sock
*dp
= dccp_sk(sk
);
428 struct dccp_skb_cb
*dcb
= DCCP_SKB_CB(skb
);
429 const int old_state
= sk
->sk_state
;
433 * Step 3: Process LISTEN state
434 * (Continuing from dccp_v4_do_rcv and dccp_v6_do_rcv)
436 * If S.state == LISTEN,
437 * If P.type == Request or P contains a valid Init Cookie
439 * * Must scan the packet's options to check for an Init
440 * Cookie. Only the Init Cookie is processed here,
441 * however; other options are processed in Step 8. This
442 * scan need only be performed if the endpoint uses Init
444 * * Generate a new socket and switch to that socket *
445 * Set S := new socket for this port pair
447 * Choose S.ISS (initial seqno) or set from Init Cookie
448 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
449 * Continue with S.state == RESPOND
450 * * A Response packet will be generated in Step 11 *
452 * Generate Reset(No Connection) unless P.type == Reset
453 * Drop packet and return
455 * NOTE: the check for the packet types is done in
456 * dccp_rcv_state_process
458 if (sk
->sk_state
== DCCP_LISTEN
) {
459 if (dh
->dccph_type
== DCCP_PKT_REQUEST
) {
460 if (inet_csk(sk
)->icsk_af_ops
->conn_request(sk
,
464 /* FIXME: do congestion control initialization */
467 if (dh
->dccph_type
== DCCP_PKT_RESET
)
470 /* Caller (dccp_v4_do_rcv) will send Reset */
471 dcb
->dccpd_reset_code
= DCCP_RESET_CODE_NO_CONNECTION
;
475 if (sk
->sk_state
!= DCCP_REQUESTING
) {
476 if (dccp_check_seqno(sk
, skb
))
480 * Step 8: Process options and mark acknowledgeable
482 if (dccp_parse_options(sk
, skb
))
485 if (dcb
->dccpd_ack_seq
!= DCCP_PKT_WITHOUT_ACK_SEQ
)
486 dccp_event_ack_recv(sk
, skb
);
488 if (dccp_msk(sk
)->dccpms_send_ack_vector
&&
489 dccp_ackvec_add(dp
->dccps_hc_rx_ackvec
, sk
,
490 DCCP_SKB_CB(skb
)->dccpd_seq
,
491 DCCP_ACKVEC_STATE_RECEIVED
))
494 ccid_hc_rx_packet_recv(dp
->dccps_hc_rx_ccid
, sk
, skb
);
495 ccid_hc_tx_packet_recv(dp
->dccps_hc_tx_ccid
, sk
, skb
);
499 * Step 9: Process Reset
500 * If P.type == Reset,
501 * Tear down connection
502 * S.state := TIMEWAIT
504 * Drop packet and return
506 if (dh
->dccph_type
== DCCP_PKT_RESET
) {
508 * Queue the equivalent of TCP fin so that dccp_recvmsg
512 dccp_time_wait(sk
, DCCP_TIME_WAIT
, 0);
515 * Step 7: Check for unexpected packet types
516 * If (S.is_server and P.type == CloseReq)
517 * or (S.is_server and P.type == Response)
518 * or (S.is_client and P.type == Request)
519 * or (S.state == RESPOND and P.type == Data),
520 * Send Sync packet acknowledging P.seqno
521 * Drop packet and return
523 } else if ((dp
->dccps_role
!= DCCP_ROLE_CLIENT
&&
524 (dh
->dccph_type
== DCCP_PKT_RESPONSE
||
525 dh
->dccph_type
== DCCP_PKT_CLOSEREQ
)) ||
526 (dp
->dccps_role
== DCCP_ROLE_CLIENT
&&
527 dh
->dccph_type
== DCCP_PKT_REQUEST
) ||
528 (sk
->sk_state
== DCCP_RESPOND
&&
529 dh
->dccph_type
== DCCP_PKT_DATA
)) {
530 dccp_send_sync(sk
, dcb
->dccpd_seq
, DCCP_PKT_SYNC
);
532 } else if (dh
->dccph_type
== DCCP_PKT_CLOSEREQ
) {
533 dccp_rcv_closereq(sk
, skb
);
535 } else if (dh
->dccph_type
== DCCP_PKT_CLOSE
) {
536 dccp_rcv_close(sk
, skb
);
540 if (unlikely(dh
->dccph_type
== DCCP_PKT_SYNC
)) {
541 dccp_send_sync(sk
, dcb
->dccpd_seq
, DCCP_PKT_SYNCACK
);
545 switch (sk
->sk_state
) {
547 dcb
->dccpd_reset_code
= DCCP_RESET_CODE_NO_CONNECTION
;
550 case DCCP_REQUESTING
:
551 /* FIXME: do congestion control initialization */
553 queued
= dccp_rcv_request_sent_state_process(sk
, skb
, dh
, len
);
562 queued
= dccp_rcv_respond_partopen_state_process(sk
, skb
,
567 if (dh
->dccph_type
== DCCP_PKT_ACK
||
568 dh
->dccph_type
== DCCP_PKT_DATAACK
) {
571 sk
->sk_state_change(sk
);
572 sk_wake_async(sk
, 0, POLL_OUT
);
584 EXPORT_SYMBOL_GPL(dccp_rcv_state_process
);