2 * Copyright (c) 2005, 2006 Andrea Bittau <a.bittau@cs.ucl.ac.uk>
4 * Changes to meet Linux coding standards, and DCCP infrastructure fixes.
6 * Copyright (c) 2006 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * This implementation should follow RFC 4341
32 #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
33 static int ccid2_debug
;
34 #define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a)
36 static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock
*hc
)
40 struct ccid2_seq
*seqp
= hc
->tx_seqh
;
42 /* there is data in the chain */
43 if (seqp
!= hc
->tx_seqt
) {
44 seqp
= seqp
->ccid2s_prev
;
46 if (!seqp
->ccid2s_acked
)
49 while (seqp
!= hc
->tx_seqt
) {
50 struct ccid2_seq
*prev
= seqp
->ccid2s_prev
;
53 if (!prev
->ccid2s_acked
)
56 /* packets are sent sequentially */
57 BUG_ON(dccp_delta_seqno(seqp
->ccid2s_seq
,
58 prev
->ccid2s_seq
) >= 0);
59 BUG_ON(time_before(seqp
->ccid2s_sent
,
66 BUG_ON(pipe
!= hc
->tx_pipe
);
67 ccid2_pr_debug("len of chain=%d\n", len
);
70 seqp
= seqp
->ccid2s_prev
;
72 } while (seqp
!= hc
->tx_seqh
);
74 ccid2_pr_debug("total len=%d\n", len
);
75 BUG_ON(len
!= hc
->tx_seqbufc
* CCID2_SEQBUF_LEN
);
78 #define ccid2_pr_debug(format, a...)
79 #define ccid2_hc_tx_check_sanity(hc)
82 static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock
*hc
)
84 struct ccid2_seq
*seqp
;
87 /* check if we have space to preserve the pointer to the buffer */
88 if (hc
->tx_seqbufc
>= (sizeof(hc
->tx_seqbuf
) /
89 sizeof(struct ccid2_seq
*)))
92 /* allocate buffer and initialize linked list */
93 seqp
= kmalloc(CCID2_SEQBUF_LEN
* sizeof(struct ccid2_seq
), gfp_any());
97 for (i
= 0; i
< (CCID2_SEQBUF_LEN
- 1); i
++) {
98 seqp
[i
].ccid2s_next
= &seqp
[i
+ 1];
99 seqp
[i
+ 1].ccid2s_prev
= &seqp
[i
];
101 seqp
[CCID2_SEQBUF_LEN
- 1].ccid2s_next
= seqp
;
102 seqp
->ccid2s_prev
= &seqp
[CCID2_SEQBUF_LEN
- 1];
104 /* This is the first allocation. Initiate the head and tail. */
105 if (hc
->tx_seqbufc
== 0)
106 hc
->tx_seqh
= hc
->tx_seqt
= seqp
;
108 /* link the existing list with the one we just created */
109 hc
->tx_seqh
->ccid2s_next
= seqp
;
110 seqp
->ccid2s_prev
= hc
->tx_seqh
;
112 hc
->tx_seqt
->ccid2s_prev
= &seqp
[CCID2_SEQBUF_LEN
- 1];
113 seqp
[CCID2_SEQBUF_LEN
- 1].ccid2s_next
= hc
->tx_seqt
;
116 /* store the original pointer to the buffer so we can free it */
117 hc
->tx_seqbuf
[hc
->tx_seqbufc
] = seqp
;
123 static int ccid2_hc_tx_send_packet(struct sock
*sk
, struct sk_buff
*skb
)
125 struct ccid2_hc_tx_sock
*hc
= ccid2_hc_tx_sk(sk
);
127 if (hc
->tx_pipe
< hc
->tx_cwnd
)
130 return 1; /* XXX CCID should dequeue when ready instead of polling */
133 static void ccid2_change_l_ack_ratio(struct sock
*sk
, u32 val
)
135 struct dccp_sock
*dp
= dccp_sk(sk
);
136 u32 max_ratio
= DIV_ROUND_UP(ccid2_hc_tx_sk(sk
)->tx_cwnd
, 2);
139 * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from
140 * RFC 4341, 6.1.2. We ignore the statement that Ack Ratio 2 is always
141 * acceptable since this causes starvation/deadlock whenever cwnd < 2.
142 * The same problem arises when Ack Ratio is 0 (ie. Ack Ratio disabled).
144 if (val
== 0 || val
> max_ratio
) {
145 DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val
, max_ratio
);
148 if (val
> DCCPF_ACK_RATIO_MAX
)
149 val
= DCCPF_ACK_RATIO_MAX
;
151 if (val
== dp
->dccps_l_ack_ratio
)
154 ccid2_pr_debug("changing local ack ratio to %u\n", val
);
155 dp
->dccps_l_ack_ratio
= val
;
158 static void ccid2_change_srtt(struct ccid2_hc_tx_sock
*hc
, long val
)
160 ccid2_pr_debug("change SRTT to %ld\n", val
);
164 static void ccid2_start_rto_timer(struct sock
*sk
);
166 static void ccid2_hc_tx_rto_expire(unsigned long data
)
168 struct sock
*sk
= (struct sock
*)data
;
169 struct ccid2_hc_tx_sock
*hc
= ccid2_hc_tx_sk(sk
);
173 if (sock_owned_by_user(sk
)) {
174 sk_reset_timer(sk
, &hc
->tx_rtotimer
, jiffies
+ HZ
/ 5);
178 ccid2_pr_debug("RTO_EXPIRE\n");
180 ccid2_hc_tx_check_sanity(hc
);
187 hc
->tx_rto
= 60 * HZ
;
189 ccid2_start_rto_timer(sk
);
191 /* adjust pipe, cwnd etc */
192 hc
->tx_ssthresh
= hc
->tx_cwnd
/ 2;
193 if (hc
->tx_ssthresh
< 2)
198 /* clear state about stuff we sent */
199 hc
->tx_seqt
= hc
->tx_seqh
;
200 hc
->tx_packets_acked
= 0;
202 /* clear ack ratio state. */
204 hc
->tx_rpdupack
= -1;
205 ccid2_change_l_ack_ratio(sk
, 1);
206 ccid2_hc_tx_check_sanity(hc
);
212 static void ccid2_start_rto_timer(struct sock
*sk
)
214 struct ccid2_hc_tx_sock
*hc
= ccid2_hc_tx_sk(sk
);
216 ccid2_pr_debug("setting RTO timeout=%ld\n", hc
->tx_rto
);
218 BUG_ON(timer_pending(&hc
->tx_rtotimer
));
219 sk_reset_timer(sk
, &hc
->tx_rtotimer
, jiffies
+ hc
->tx_rto
);
222 static void ccid2_hc_tx_packet_sent(struct sock
*sk
, int more
, unsigned int len
)
224 struct dccp_sock
*dp
= dccp_sk(sk
);
225 struct ccid2_hc_tx_sock
*hc
= ccid2_hc_tx_sk(sk
);
226 struct ccid2_seq
*next
;
230 hc
->tx_seqh
->ccid2s_seq
= dp
->dccps_gss
;
231 hc
->tx_seqh
->ccid2s_acked
= 0;
232 hc
->tx_seqh
->ccid2s_sent
= jiffies
;
234 next
= hc
->tx_seqh
->ccid2s_next
;
235 /* check if we need to alloc more space */
236 if (next
== hc
->tx_seqt
) {
237 if (ccid2_hc_tx_alloc_seq(hc
)) {
238 DCCP_CRIT("packet history - out of memory!");
239 /* FIXME: find a more graceful way to bail out */
242 next
= hc
->tx_seqh
->ccid2s_next
;
243 BUG_ON(next
== hc
->tx_seqt
);
247 ccid2_pr_debug("cwnd=%d pipe=%d\n", hc
->tx_cwnd
, hc
->tx_pipe
);
250 * FIXME: The code below is broken and the variables have been removed
251 * from the socket struct. The `ackloss' variable was always set to 0,
252 * and with arsent there are several problems:
253 * (i) it doesn't just count the number of Acks, but all sent packets;
254 * (ii) it is expressed in # of packets, not # of windows, so the
255 * comparison below uses the wrong formula: Appendix A of RFC 4341
256 * comes up with the number K = cwnd / (R^2 - R) of consecutive windows
257 * of data with no lost or marked Ack packets. If arsent were the # of
258 * consecutive Acks received without loss, then Ack Ratio needs to be
259 * decreased by 1 when
260 * arsent >= K * cwnd / R = cwnd^2 / (R^3 - R^2)
261 * where cwnd / R is the number of Acks received per window of data
262 * (cf. RFC 4341, App. A). The problems are that
263 * - arsent counts other packets as well;
264 * - the comparison uses a formula different from RFC 4341;
265 * - computing a cubic/quadratic equation each time is too complicated.
266 * Hence a different algorithm is needed.
269 /* Ack Ratio. Need to maintain a concept of how many windows we sent */
271 /* We had an ack loss in this window... */
272 if (hc
->tx_ackloss
) {
273 if (hc
->tx_arsent
>= hc
->tx_cwnd
) {
278 /* No acks lost up to now... */
279 /* decrease ack ratio if enough packets were sent */
280 if (dp
->dccps_l_ack_ratio
> 1) {
281 /* XXX don't calculate denominator each time */
282 int denom
= dp
->dccps_l_ack_ratio
* dp
->dccps_l_ack_ratio
-
283 dp
->dccps_l_ack_ratio
;
285 denom
= hc
->tx_cwnd
* hc
->tx_cwnd
/ denom
;
287 if (hc
->tx_arsent
>= denom
) {
288 ccid2_change_l_ack_ratio(sk
, dp
->dccps_l_ack_ratio
- 1);
292 /* we can't increase ack ratio further [1] */
293 hc
->tx_arsent
= 0; /* or maybe set it to cwnd*/
298 /* setup RTO timer */
299 if (!timer_pending(&hc
->tx_rtotimer
))
300 ccid2_start_rto_timer(sk
);
302 #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
304 struct ccid2_seq
*seqp
= hc
->tx_seqt
;
306 while (seqp
!= hc
->tx_seqh
) {
307 ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n",
308 (unsigned long long)seqp
->ccid2s_seq
,
309 seqp
->ccid2s_acked
, seqp
->ccid2s_sent
);
310 seqp
= seqp
->ccid2s_next
;
313 ccid2_pr_debug("=========\n");
314 ccid2_hc_tx_check_sanity(hc
);
318 /* XXX Lame code duplication!
319 * returns -1 if none was found.
320 * else returns the next offset to use in the function call.
322 static int ccid2_ackvector(struct sock
*sk
, struct sk_buff
*skb
, int offset
,
323 unsigned char **vec
, unsigned char *veclen
)
325 const struct dccp_hdr
*dh
= dccp_hdr(skb
);
326 unsigned char *options
= (unsigned char *)dh
+ dccp_hdr_len(skb
);
327 unsigned char *opt_ptr
;
328 const unsigned char *opt_end
= (unsigned char *)dh
+
329 (dh
->dccph_doff
* 4);
330 unsigned char opt
, len
;
331 unsigned char *value
;
336 if (opt_ptr
>= opt_end
)
339 while (opt_ptr
!= opt_end
) {
344 /* Check if this isn't a single byte option */
345 if (opt
> DCCPO_MAX_RESERVED
) {
346 if (opt_ptr
== opt_end
)
347 goto out_invalid_option
;
351 goto out_invalid_option
;
353 * Remove the type and len fields, leaving
354 * just the value size
360 if (opt_ptr
> opt_end
)
361 goto out_invalid_option
;
365 case DCCPO_ACK_VECTOR_0
:
366 case DCCPO_ACK_VECTOR_1
:
369 return offset
+ (opt_ptr
- options
);
376 DCCP_BUG("Invalid option - this should not happen (previous parsing)!");
380 static void ccid2_hc_tx_kill_rto_timer(struct sock
*sk
)
382 struct ccid2_hc_tx_sock
*hc
= ccid2_hc_tx_sk(sk
);
384 sk_stop_timer(sk
, &hc
->tx_rtotimer
);
385 ccid2_pr_debug("deleted RTO timer\n");
388 static inline void ccid2_new_ack(struct sock
*sk
,
389 struct ccid2_seq
*seqp
,
390 unsigned int *maxincr
)
392 struct ccid2_hc_tx_sock
*hc
= ccid2_hc_tx_sk(sk
);
394 if (hc
->tx_cwnd
< hc
->tx_ssthresh
) {
395 if (*maxincr
> 0 && ++hc
->tx_packets_acked
== 2) {
398 hc
->tx_packets_acked
= 0;
400 } else if (++hc
->tx_packets_acked
>= hc
->tx_cwnd
) {
402 hc
->tx_packets_acked
= 0;
406 if (hc
->tx_srtt
== -1 ||
407 time_after(jiffies
, hc
->tx_lastrtt
+ hc
->tx_srtt
)) {
408 unsigned long r
= (long)jiffies
- (long)seqp
->ccid2s_sent
;
411 /* first measurement */
412 if (hc
->tx_srtt
== -1) {
413 ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n",
415 (unsigned long long)seqp
->ccid2s_seq
);
416 ccid2_change_srtt(hc
, r
);
417 hc
->tx_rttvar
= r
>> 1;
420 long tmp
= hc
->tx_srtt
- r
;
429 hc
->tx_rttvar
+= tmp
;
437 ccid2_change_srtt(hc
, srtt
);
439 s
= hc
->tx_rttvar
<< 2;
440 /* clock granularity is 1 when based on jiffies */
443 hc
->tx_rto
= hc
->tx_srtt
+ s
;
445 /* must be at least a second */
447 /* DCCP doesn't require this [but I like it cuz my code sux] */
454 hc
->tx_rto
= HZ
* 60;
456 hc
->tx_lastrtt
= jiffies
;
458 ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n",
459 hc
->tx_srtt
, hc
->tx_rttvar
,
463 /* we got a new ack, so re-start RTO timer */
464 ccid2_hc_tx_kill_rto_timer(sk
);
465 ccid2_start_rto_timer(sk
);
468 static void ccid2_hc_tx_dec_pipe(struct sock
*sk
)
470 struct ccid2_hc_tx_sock
*hc
= ccid2_hc_tx_sk(sk
);
472 if (hc
->tx_pipe
== 0)
473 DCCP_BUG("pipe == 0");
477 if (hc
->tx_pipe
== 0)
478 ccid2_hc_tx_kill_rto_timer(sk
);
481 static void ccid2_congestion_event(struct sock
*sk
, struct ccid2_seq
*seqp
)
483 struct ccid2_hc_tx_sock
*hc
= ccid2_hc_tx_sk(sk
);
485 if (time_before(seqp
->ccid2s_sent
, hc
->tx_last_cong
)) {
486 ccid2_pr_debug("Multiple losses in an RTT---treating as one\n");
490 hc
->tx_last_cong
= jiffies
;
492 hc
->tx_cwnd
= hc
->tx_cwnd
/ 2 ? : 1U;
493 hc
->tx_ssthresh
= max(hc
->tx_cwnd
, 2U);
495 /* Avoid spurious timeouts resulting from Ack Ratio > cwnd */
496 if (dccp_sk(sk
)->dccps_l_ack_ratio
> hc
->tx_cwnd
)
497 ccid2_change_l_ack_ratio(sk
, hc
->tx_cwnd
);
500 static void ccid2_hc_tx_packet_recv(struct sock
*sk
, struct sk_buff
*skb
)
502 struct dccp_sock
*dp
= dccp_sk(sk
);
503 struct ccid2_hc_tx_sock
*hc
= ccid2_hc_tx_sk(sk
);
505 struct ccid2_seq
*seqp
;
506 unsigned char *vector
;
507 unsigned char veclen
;
510 unsigned int maxincr
= 0;
512 ccid2_hc_tx_check_sanity(hc
);
513 /* check reverse path congestion */
514 seqno
= DCCP_SKB_CB(skb
)->dccpd_seq
;
516 /* XXX this whole "algorithm" is broken. Need to fix it to keep track
517 * of the seqnos of the dupacks so that rpseq and rpdupack are correct
520 /* need to bootstrap */
521 if (hc
->tx_rpdupack
== -1) {
523 hc
->tx_rpseq
= seqno
;
525 /* check if packet is consecutive */
526 if (dccp_delta_seqno(hc
->tx_rpseq
, seqno
) == 1)
527 hc
->tx_rpseq
= seqno
;
528 /* it's a later packet */
529 else if (after48(seqno
, hc
->tx_rpseq
)) {
532 /* check if we got enough dupacks */
533 if (hc
->tx_rpdupack
>= NUMDUPACK
) {
534 hc
->tx_rpdupack
= -1; /* XXX lame */
537 ccid2_change_l_ack_ratio(sk
, 2 * dp
->dccps_l_ack_ratio
);
542 /* check forward path congestion */
543 /* still didn't send out new data packets */
544 if (hc
->tx_seqh
== hc
->tx_seqt
)
547 switch (DCCP_SKB_CB(skb
)->dccpd_type
) {
549 case DCCP_PKT_DATAACK
:
555 ackno
= DCCP_SKB_CB(skb
)->dccpd_ack_seq
;
556 if (after48(ackno
, hc
->tx_high_ack
))
557 hc
->tx_high_ack
= ackno
;
560 while (before48(seqp
->ccid2s_seq
, ackno
)) {
561 seqp
= seqp
->ccid2s_next
;
562 if (seqp
== hc
->tx_seqh
) {
563 seqp
= hc
->tx_seqh
->ccid2s_prev
;
569 * In slow-start, cwnd can increase up to a maximum of Ack Ratio/2
570 * packets per acknowledgement. Rounding up avoids that cwnd is not
571 * advanced when Ack Ratio is 1 and gives a slight edge otherwise.
573 if (hc
->tx_cwnd
< hc
->tx_ssthresh
)
574 maxincr
= DIV_ROUND_UP(dp
->dccps_l_ack_ratio
, 2);
576 /* go through all ack vectors */
577 while ((offset
= ccid2_ackvector(sk
, skb
, offset
,
578 &vector
, &veclen
)) != -1) {
579 /* go through this ack vector */
581 const u8 rl
= *vector
& DCCP_ACKVEC_LEN_MASK
;
582 u64 ackno_end_rl
= SUB48(ackno
, rl
);
584 ccid2_pr_debug("ackvec start:%llu end:%llu\n",
585 (unsigned long long)ackno
,
586 (unsigned long long)ackno_end_rl
);
587 /* if the seqno we are analyzing is larger than the
588 * current ackno, then move towards the tail of our
591 while (after48(seqp
->ccid2s_seq
, ackno
)) {
592 if (seqp
== hc
->tx_seqt
) {
596 seqp
= seqp
->ccid2s_prev
;
601 /* check all seqnos in the range of the vector
604 while (between48(seqp
->ccid2s_seq
,ackno_end_rl
,ackno
)) {
605 const u8 state
= *vector
&
606 DCCP_ACKVEC_STATE_MASK
;
608 /* new packet received or marked */
609 if (state
!= DCCP_ACKVEC_STATE_NOT_RECEIVED
&&
610 !seqp
->ccid2s_acked
) {
612 DCCP_ACKVEC_STATE_ECN_MARKED
) {
613 ccid2_congestion_event(sk
,
616 ccid2_new_ack(sk
, seqp
,
619 seqp
->ccid2s_acked
= 1;
620 ccid2_pr_debug("Got ack for %llu\n",
621 (unsigned long long)seqp
->ccid2s_seq
);
622 ccid2_hc_tx_dec_pipe(sk
);
624 if (seqp
== hc
->tx_seqt
) {
628 seqp
= seqp
->ccid2s_prev
;
633 ackno
= SUB48(ackno_end_rl
, 1);
640 /* The state about what is acked should be correct now
641 * Check for NUMDUPACK
644 while (before48(seqp
->ccid2s_seq
, hc
->tx_high_ack
)) {
645 seqp
= seqp
->ccid2s_next
;
646 if (seqp
== hc
->tx_seqh
) {
647 seqp
= hc
->tx_seqh
->ccid2s_prev
;
653 if (seqp
->ccid2s_acked
) {
655 if (done
== NUMDUPACK
)
658 if (seqp
== hc
->tx_seqt
)
660 seqp
= seqp
->ccid2s_prev
;
663 /* If there are at least 3 acknowledgements, anything unacknowledged
664 * below the last sequence number is considered lost
666 if (done
== NUMDUPACK
) {
667 struct ccid2_seq
*last_acked
= seqp
;
669 /* check for lost packets */
671 if (!seqp
->ccid2s_acked
) {
672 ccid2_pr_debug("Packet lost: %llu\n",
673 (unsigned long long)seqp
->ccid2s_seq
);
674 /* XXX need to traverse from tail -> head in
675 * order to detect multiple congestion events in
678 ccid2_congestion_event(sk
, seqp
);
679 ccid2_hc_tx_dec_pipe(sk
);
681 if (seqp
== hc
->tx_seqt
)
683 seqp
= seqp
->ccid2s_prev
;
686 hc
->tx_seqt
= last_acked
;
689 /* trim acked packets in tail */
690 while (hc
->tx_seqt
!= hc
->tx_seqh
) {
691 if (!hc
->tx_seqt
->ccid2s_acked
)
694 hc
->tx_seqt
= hc
->tx_seqt
->ccid2s_next
;
697 ccid2_hc_tx_check_sanity(hc
);
700 static int ccid2_hc_tx_init(struct ccid
*ccid
, struct sock
*sk
)
702 struct ccid2_hc_tx_sock
*hc
= ccid_priv(ccid
);
703 struct dccp_sock
*dp
= dccp_sk(sk
);
706 /* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */
707 hc
->tx_ssthresh
= ~0U;
710 * RFC 4341, 5: "The cwnd parameter is initialized to at most four
711 * packets for new connections, following the rules from [RFC3390]".
712 * We need to convert the bytes of RFC3390 into the packets of RFC 4341.
714 hc
->tx_cwnd
= clamp(4380U / dp
->dccps_mss_cache
, 2U, 4U);
716 /* Make sure that Ack Ratio is enabled and within bounds. */
717 max_ratio
= DIV_ROUND_UP(hc
->tx_cwnd
, 2);
718 if (dp
->dccps_l_ack_ratio
== 0 || dp
->dccps_l_ack_ratio
> max_ratio
)
719 dp
->dccps_l_ack_ratio
= max_ratio
;
721 /* XXX init ~ to window size... */
722 if (ccid2_hc_tx_alloc_seq(hc
))
726 ccid2_change_srtt(hc
, -1);
728 hc
->tx_rpdupack
= -1;
729 hc
->tx_last_cong
= jiffies
;
730 setup_timer(&hc
->tx_rtotimer
, ccid2_hc_tx_rto_expire
,
733 ccid2_hc_tx_check_sanity(hc
);
737 static void ccid2_hc_tx_exit(struct sock
*sk
)
739 struct ccid2_hc_tx_sock
*hc
= ccid2_hc_tx_sk(sk
);
742 ccid2_hc_tx_kill_rto_timer(sk
);
744 for (i
= 0; i
< hc
->tx_seqbufc
; i
++)
745 kfree(hc
->tx_seqbuf
[i
]);
749 static void ccid2_hc_rx_packet_recv(struct sock
*sk
, struct sk_buff
*skb
)
751 const struct dccp_sock
*dp
= dccp_sk(sk
);
752 struct ccid2_hc_rx_sock
*hc
= ccid2_hc_rx_sk(sk
);
754 switch (DCCP_SKB_CB(skb
)->dccpd_type
) {
756 case DCCP_PKT_DATAACK
:
758 if (hc
->rx_data
>= dp
->dccps_r_ack_ratio
) {
766 struct ccid_operations ccid2_ops
= {
767 .ccid_id
= DCCPC_CCID2
,
768 .ccid_name
= "TCP-like",
769 .ccid_hc_tx_obj_size
= sizeof(struct ccid2_hc_tx_sock
),
770 .ccid_hc_tx_init
= ccid2_hc_tx_init
,
771 .ccid_hc_tx_exit
= ccid2_hc_tx_exit
,
772 .ccid_hc_tx_send_packet
= ccid2_hc_tx_send_packet
,
773 .ccid_hc_tx_packet_sent
= ccid2_hc_tx_packet_sent
,
774 .ccid_hc_tx_packet_recv
= ccid2_hc_tx_packet_recv
,
775 .ccid_hc_rx_obj_size
= sizeof(struct ccid2_hc_rx_sock
),
776 .ccid_hc_rx_packet_recv
= ccid2_hc_rx_packet_recv
,
779 #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
780 module_param(ccid2_debug
, bool, 0644);
781 MODULE_PARM_DESC(ccid2_debug
, "Enable CCID-2 debug messages");