qos fixes, reset bufferusage on conn reset, conn refcnt fix
[cor_2_6_31.git] / net / cor / kpacket_gen.c
blobf4da294c260000c9ac4d43ef6ecfb4ecd8d96b99
1 /*
2 * Connection oriented routing
3 * Copyright (C) 2007-2010 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <asm/byteorder.h>
23 #include "cor.h"
25 /* not sent over the network - internal meaning only */
26 #define MSGTYPE_PONG 1
27 #define MSGTYPE_ACK 2
28 #define MSGTYPE_ACK_CONN 3
29 #define MSGTYPE_ACK_CONN_OOO 4
30 #define MSGTYPE_CONNECT 5
31 #define MSGTYPE_CONNECT_SUCCESS 6
32 #define MSGTYPE_RESET_CONN 7
33 #define MSGTYPE_CONNDATA 8
34 #define MSGTYPE_PING_CONN 9
35 #define MSGTYPE_CONNID_UNKNOWN 10
36 #define MSGTYPE_PING_ALL_CONNS 11
37 #define MSGTYPE_SET_MAX_CMSG_DELAY 12
38 #define MSGTYPE_SET_CREDITS 13
40 struct control_msg_out{
41 struct list_head lh; /* either neighbor or control_retrans_packet */
42 struct neighbor *nb;
44 __u32 length;
46 __u8 type;
47 union{
48 struct{
49 __u32 cookie;
50 unsigned long time_enqueued; /* jiffies */
51 int sent;
52 }pong;
54 struct{
55 __u32 seqno;
56 }ack;
58 struct{
59 struct conn *rconn;
60 __u32 conn_id;
61 __u32 seqno;
62 }ack_conn;
64 struct{
65 struct conn *rconn;
66 __u32 conn_id;
67 __u32 seqno;
68 __u32 seqno_ooo;
69 __u32 length;
70 }ack_conn_ooo;
72 struct{
73 __u32 conn_id;
74 __u32 init_seqno;
75 struct conn *sconn;
76 }connect;
78 struct{
79 __u32 rcvd_conn_id;
80 __u32 gen_conn_id;
81 __u32 init_seqno;
82 struct conn *rconn;
83 }connect_success;
85 struct{
86 __u32 conn_id;
87 }reset;
89 struct{
90 __u32 conn_id;
91 __u32 seqno;
92 char *data_orig;
93 char *data;
94 __u32 datalen;
95 }conn_data;
97 struct{
98 __u32 conn_id;
99 }ping_conn;
101 struct{
102 __u32 conn_id;
103 }connid_unknown;
105 struct{
106 __u32 delay;
107 }set_max_cmsg_delay;
108 }msg;
111 struct control_retrans {
112 struct kref ref;
114 struct neighbor *nb;
115 __u32 seqno;
117 unsigned long timeout;
119 struct list_head msgs;
121 struct htab_entry htab_entry;
122 struct list_head timeout_list;
125 struct kmem_cache *controlmsg_slab;
126 struct kmem_cache *controlretrans_slab;
128 static struct htable retransmits;
130 atomic_t cmcnt = ATOMIC_INIT(0);
132 static void add_control_msg(struct control_msg_out *msg, int retrans);
134 static inline int isurgent(struct control_msg_out *cm)
136 if (unlikely(cm->type == MSGTYPE_PONG || cm->type == MSGTYPE_ACK))
137 return 1;
138 return 0;
141 static struct control_msg_out *__alloc_control_msg(void)
143 struct control_msg_out *cm = kmem_cache_alloc(controlmsg_slab,
144 GFP_KERNEL);
145 if (unlikely(cm == 0))
146 return 0;
147 cm->lh.next = LIST_POISON1;
148 cm->lh.prev = LIST_POISON2;
149 return cm;
152 static int calc_limit(int limit, int priority)
154 if (priority == ACM_PRIORITY_LOW)
155 return (limit+1)/2;
156 else if (priority == ACM_PRIORITY_MED)
157 return (limit*3 + 3)/4;
158 else if (priority == ACM_PRIORITY_HIGH)
159 return limit;
160 else
161 BUG();
164 static struct control_msg_out *_alloc_control_msg(struct neighbor *nb,
165 int priority, int urgent)
167 struct control_msg_out *cm = 0;
169 BUG_ON(nb == 0);
171 if (urgent == 0) {
172 long packets1 = atomic_inc_return(&(nb->cmcnt));
173 long packets2 = atomic_inc_return(&(cmcnt));
175 BUG_ON(packets1 <= 0);
176 BUG_ON(packets2 <= 0);
178 if (packets1 <= calc_limit(GUARANTEED_CMSGS_PER_NEIGH,
179 priority))
180 goto alloc;
182 if (unlikely(unlikely(packets2 > calc_limit(MAX_CMSGS_PER_NEIGH,
183 priority)) || unlikely(packets1 > (
184 calc_limit(MAX_CMSGS_PER_NEIGH, priority) *
185 (MAX_CMSGS - packets2) / MAX_CMSGS))))
186 goto full;
189 alloc:
190 cm = __alloc_control_msg();
191 if (unlikely(cm == 0))
192 goto full;
193 cm->nb = nb;
195 if (0) {
196 full:
197 if (urgent == 0) {
198 atomic_dec(&(nb->cmcnt));
199 atomic_dec(&(cmcnt));
202 return cm;
205 struct control_msg_out *alloc_control_msg(struct neighbor *nb, int priority)
207 return _alloc_control_msg(nb, priority, 0);
210 void free_control_msg(struct control_msg_out *cm)
212 if (isurgent(cm) == 0) {
213 atomic_dec(&(cm->nb->cmcnt));
214 atomic_dec(&(cmcnt));
217 if (cm->type == MSGTYPE_ACK_CONN) {
218 BUG_ON(cm->msg.ack_conn.rconn == 0);
219 kref_put(&(cm->msg.ack_conn.rconn->ref), free_conn);
220 cm->msg.ack_conn.rconn = 0;
221 } else if (cm->type == MSGTYPE_ACK_CONN_OOO) {
222 BUG_ON(cm->msg.ack_conn_ooo.rconn == 0);
223 kref_put(&(cm->msg.ack_conn_ooo.rconn->ref), free_conn);
224 cm->msg.ack_conn_ooo.rconn = 0;
225 } else if (cm->type == MSGTYPE_CONNECT) {
226 BUG_ON(cm->msg.connect.sconn == 0);
227 kref_put(&(cm->msg.connect.sconn->ref), free_conn);
228 cm->msg.connect.sconn = 0;
229 } else if (cm->type == MSGTYPE_CONNECT_SUCCESS) {
230 BUG_ON(cm->msg.connect_success.rconn == 0);
231 kref_put(&(cm->msg.connect_success.rconn->ref), free_conn);
232 cm->msg.connect_success.rconn = 0;
235 kmem_cache_free(controlmsg_slab, cm);
238 static void free_control_retrans(struct kref *ref)
240 struct control_retrans *cr = container_of(ref, struct control_retrans,
241 ref);
243 while (list_empty(&(cr->msgs)) == 0) {
244 struct control_msg_out *cm = container_of(cr->msgs.next,
245 struct control_msg_out, lh);
246 list_del(&(cm->lh));
247 free_control_msg(cm);
250 kmem_cache_free(controlretrans_slab, cr);
253 struct retransmit_matchparam {
254 struct neighbor *nb;
255 __u32 seqno;
258 static __u32 rm_to_key(struct retransmit_matchparam *rm)
260 return ((__u32)((long) rm->nb)) ^ rm->seqno;
263 static void set_retrans_timeout(struct control_retrans *cr, struct neighbor *nb)
265 cr->timeout = jiffies + usecs_to_jiffies(100000 +
266 ((__u32) atomic_read(&(nb->latency))) * 2 +
267 ((__u32) atomic_read(&(nb->max_remote_cmsg_delay))));
270 void retransmit_timerfunc(struct work_struct *work)
272 unsigned long iflags;
274 struct neighbor *nb = container_of(to_delayed_work(work),
275 struct neighbor, retrans_timer);
277 int nbstate;
278 int nbput = 0;
280 spin_lock_irqsave( &(nb->state_lock), iflags );
281 nbstate = nb->state;
282 spin_unlock_irqrestore( &(nb->state_lock), iflags );
284 while (1) {
285 struct control_retrans *cr = 0;
286 struct retransmit_matchparam rm;
288 spin_lock_irqsave( &(nb->retrans_lock), iflags );
290 if (list_empty(&(nb->retrans_list))) {
291 nb->retrans_timer_running = 0;
292 nbput = 1;
293 break;
296 cr = container_of(nb->retrans_list.next,
297 struct control_retrans, timeout_list);
299 BUG_ON(cr->nb != nb);
301 rm.seqno = cr->seqno;
302 rm.nb = nb;
304 list_del(&(cr->timeout_list));
306 if (unlikely(nbstate == NEIGHBOR_STATE_KILLED)) {
307 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
309 htable_delete(&retransmits, rm_to_key(&rm), &rm,
310 free_control_retrans);
311 kref_put(&(cr->ref), free_control_retrans);
312 continue;
315 if (time_after(cr->timeout, jiffies)) {
316 list_add(&(cr->timeout_list), &(nb->retrans_list));
317 schedule_delayed_work(&(nb->retrans_timer),
318 cr->timeout - jiffies);
319 break;
322 if (unlikely(htable_delete(&retransmits, rm_to_key(&rm), &rm,
323 free_control_retrans)))
324 BUG();
326 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
328 while (list_empty(&(cr->msgs)) == 0) {
329 struct control_msg_out *cm = container_of(cr->msgs.next,
330 struct control_msg_out, lh);
331 list_del(&(cm->lh));
332 add_control_msg(cm, 1);
335 kref_put(&(cr->ref), free_control_retrans);
338 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
340 if (nbput)
341 kref_put(&(nb->ref), neighbor_free);
344 static void schedule_retransmit(struct control_retrans *cr, struct neighbor *nb)
346 unsigned long iflags;
348 struct retransmit_matchparam rm;
349 int first;
351 rm.seqno = cr->seqno;
352 rm.nb = nb;
354 set_retrans_timeout(cr, nb);
356 spin_lock_irqsave( &(nb->retrans_lock), iflags );
357 htable_insert(&retransmits, (char *) cr, rm_to_key(&rm));
358 first = list_empty(&(nb->retrans_list));
359 list_add_tail(&(cr->timeout_list), &(nb->retrans_list));
361 if (first && nb->retrans_timer_running == 0) {
362 schedule_delayed_work(&(nb->retrans_timer),
363 cr->timeout - jiffies);
364 nb->retrans_timer_running = 1;
365 kref_get(&(nb->ref));
368 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
371 void kern_ack_rcvd(struct neighbor *nb, __u32 seqno)
373 unsigned long iflags;
375 struct control_retrans *cr = 0;
376 struct retransmit_matchparam rm;
378 rm.seqno = seqno;
379 rm.nb = nb;
381 spin_lock_irqsave( &(nb->retrans_lock), iflags );
383 cr = (struct control_retrans *) htable_get(&retransmits, rm_to_key(&rm),
384 &rm);
386 if (cr == 0) {
387 printk(KERN_ERR "bogus/duplicate ack received");
388 goto out;
391 if (unlikely(htable_delete(&retransmits, rm_to_key(&rm), &rm,
392 free_control_retrans)))
393 BUG();
395 BUG_ON(cr->nb != nb);
397 list_del(&(cr->timeout_list));
399 out:
400 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
402 if (cr != 0) {
403 kref_put(&(cr->ref), free_control_retrans); /* htable_get */
404 kref_put(&(cr->ref), free_control_retrans); /* list */
408 static void padding(struct sk_buff *skb, int length)
410 char *dst;
411 if (length <= 0)
412 return;
413 dst = skb_put(skb, length);
414 BUG_ON(dst == 0);
415 memset(dst, KP_PADDING, length);
418 static int add_ack(struct sk_buff *skb, struct control_retrans *cr,
419 struct control_msg_out *cm, int spaceleft)
421 char *dst;
423 if (unlikely(spaceleft < 5))
424 return 0;
426 dst = skb_put(skb, 5);
427 BUG_ON(dst == 0);
429 dst[0] = KP_ACK;
430 put_u32(dst + 1, cm->msg.ack.seqno, 1);
432 atomic_dec(&(cm->nb->ucmcnt));
433 free_control_msg(cm);
435 return 5;
438 static int add_ack_conn(struct sk_buff *skb, struct control_retrans *cr,
439 struct control_msg_out *cm, int spaceleft)
441 char *dst;
443 if (unlikely(spaceleft < 10))
444 return 0;
446 dst = skb_put(skb, 10);
447 BUG_ON(dst == 0);
449 dst[0] = KP_ACK_CONN;
450 put_u32(dst + 1, cm->msg.ack_conn.conn_id, 1);
451 put_u32(dst + 5, cm->msg.ack_conn.seqno, 1);
452 BUG_ON(cm->msg.ack_conn.rconn == 0);
453 dst[9] = enc_window(get_window(cm->msg.ack_conn.rconn));
455 list_add_tail(&(cm->lh), &(cr->msgs));
457 return 10;
460 static int add_ack_conn_ooo(struct sk_buff *skb, struct control_retrans *cr,
461 struct control_msg_out *cm, int spaceleft)
463 char *dst;
465 if (unlikely(spaceleft < 18))
466 return 0;
468 dst = skb_put(skb, 18);
469 BUG_ON(dst == 0);
471 dst[0] = KP_ACK_CONN_OOO;
472 put_u32(dst + 1, cm->msg.ack_conn_ooo.conn_id, 1);
473 put_u32(dst + 5, cm->msg.ack_conn_ooo.seqno, 1);
474 BUG_ON(cm->msg.ack_conn_ooo.rconn == 0);
475 dst[9] = enc_window(get_window(cm->msg.ack_conn_ooo.rconn));
476 put_u32(dst + 10, cm->msg.ack_conn_ooo.seqno_ooo, 1);
477 put_u32(dst + 14, cm->msg.ack_conn_ooo.length, 1);
479 list_add_tail(&(cm->lh), &(cr->msgs));
481 return 18;
484 static int add_ping(struct sk_buff *skb, __u32 cookie,
485 int spaceleft)
487 char *dst;
489 if (unlikely(spaceleft < 5))
490 return 0;
492 dst = skb_put(skb, 5);
493 BUG_ON(dst == 0);
495 dst[0] = KP_PING;
496 put_u32(dst + 1, cookie, 0);
498 return 5;
501 static int add_pong(struct sk_buff *skb, struct control_retrans *cr,
502 struct control_msg_out *cm, int spaceleft)
504 char *dst;
506 if (unlikely(spaceleft < 9))
507 return 0;
509 dst = skb_put(skb, 9);
510 BUG_ON(dst == 0);
512 dst[0] = KP_PONG;
513 put_u32(dst + 1, cm->msg.pong.cookie, 0);
514 put_u32(dst + 5, 1000 * jiffies_to_msecs(jiffies -
515 cm->msg.pong.time_enqueued), 1);
518 atomic_dec(&(cm->nb->ucmcnt));
519 list_add_tail(&(cm->lh), &(cr->msgs));
521 return 9;
524 static int add_connect(struct sk_buff *skb, struct control_retrans *cr,
525 struct control_msg_out *cm, int spaceleft)
527 char *dst;
529 if (unlikely(spaceleft < 10))
530 return 0;
532 dst = skb_put(skb, 10);
533 BUG_ON(dst == 0);
535 dst[0] = KP_CONNECT;
536 put_u32(dst + 1, cm->msg.connect.conn_id, 1);
537 put_u32(dst + 5, cm->msg.connect.init_seqno, 1);
538 BUG_ON(cm->msg.connect.sconn == 0);
539 dst[9] = enc_window(get_window(cm->msg.connect.sconn));
541 list_add_tail(&(cm->lh), &(cr->msgs));
543 return 10;
546 static int add_connect_success(struct sk_buff *skb, struct control_retrans *cr,
547 struct control_msg_out *cm, int spaceleft)
549 char *dst;
551 if (unlikely(spaceleft < 14))
552 return 0;
554 dst = skb_put(skb, 14);
555 BUG_ON(dst == 0);
557 dst[0] = KP_CONNECT_SUCCESS;
558 put_u32(dst + 1, cm->msg.connect_success.rcvd_conn_id, 1);
559 put_u32(dst + 5, cm->msg.connect_success.gen_conn_id, 1);
560 put_u32(dst + 9, cm->msg.connect_success.init_seqno, 1);
561 BUG_ON(cm->msg.connect_success.rconn == 0);
562 dst[13] = enc_window(get_window(cm->msg.connect_success.rconn));
564 list_add_tail(&(cm->lh), &(cr->msgs));
566 return 14;
569 static int add_reset_conn(struct sk_buff *skb, struct control_retrans *cr,
570 struct control_msg_out *cm, int spaceleft)
572 char *dst;
574 if (unlikely(spaceleft < 5))
575 return 0;
577 dst = skb_put(skb, 5);
578 BUG_ON(dst == 0);
580 dst[0] = KP_RESET_CONN;
581 put_u32(dst + 1, cm->msg.reset.conn_id, 1);
583 list_add_tail(&(cm->lh), &(cr->msgs));
585 return 5;
588 static int add_conndata(struct sk_buff *skb, struct control_retrans *cr,
589 struct control_msg_out *cm, int spaceleft,
590 struct control_msg_out **split_conndata, __u32 *sc_sendlen)
592 char *dst;
594 int totallen = cm->msg.conn_data.datalen + 11;
595 int putlen = min(totallen, spaceleft);
596 int dataputlen = putlen - 11;
598 BUG_ON(split_conndata == 0);
599 BUG_ON(sc_sendlen == 0);
601 if (dataputlen < 1 || (spaceleft < 25 && spaceleft < totallen))
602 return 0;
604 dst = skb_put(skb, putlen);
605 BUG_ON(dst == 0);
607 dst[0] = KP_CONN_DATA;
608 put_u32(dst + 1, cm->msg.conn_data.conn_id, 1);
609 put_u32(dst + 5, cm->msg.conn_data.seqno, 1);
610 put_u16(dst + 9, dataputlen, 1);
612 memcpy(dst + 11, cm->msg.conn_data.data, dataputlen);
614 if (cm->msg.conn_data.datalen == dataputlen) {
615 list_add_tail(&(cm->lh), &(cr->msgs));
616 } else {
617 *split_conndata = cm;
618 *sc_sendlen = dataputlen;
621 return putlen;
624 static int add_ping_conn(struct sk_buff *skb, struct control_retrans *cr,
625 struct control_msg_out *cm, int spaceleft)
627 char *dst;
629 if (unlikely(spaceleft < 5))
630 return 0;
632 dst = skb_put(skb, 5);
633 BUG_ON(dst == 0);
635 dst[0] = KP_PING_CONN;
636 put_u32(dst + 1, cm->msg.ping_conn.conn_id, 1);
638 list_add_tail(&(cm->lh), &(cr->msgs));
640 return 5;
643 static int add_connid_unknown(struct sk_buff *skb, struct control_retrans *cr,
644 struct control_msg_out *cm, int spaceleft)
646 char *dst;
648 if (unlikely(spaceleft < 5))
649 return 0;
651 dst = skb_put(skb, 5);
652 BUG_ON(dst == 0);
654 dst[0] = KP_CONNID_UNKNOWN;
655 put_u32(dst + 1, cm->msg.connid_unknown.conn_id, 1);
657 list_add_tail(&(cm->lh), &(cr->msgs));
659 return 5;
662 static int add_ping_all_conns(struct sk_buff *skb, struct control_retrans *cr,
663 struct control_msg_out *cm, int spaceleft)
665 char *dst;
667 if (unlikely(spaceleft < 1))
668 return 0;
670 dst = skb_put(skb, 1);
671 BUG_ON(dst == 0);
673 dst[0] = KP_PING_ALL_CONNS;
675 list_add_tail(&(cm->lh), &(cr->msgs));
677 return 1;
680 static int add_set_max_cmsg_dly(struct sk_buff *skb, struct control_retrans *cr,
681 struct control_msg_out *cm, int spaceleft)
683 char *dst;
685 if (unlikely(spaceleft < 5))
686 return 0;
688 dst = skb_put(skb, 5);
689 BUG_ON(dst == 0);
691 dst[0] = KP_SET_MAX_CMSG_DELAY;
692 put_u32(dst + 1, cm->msg.set_max_cmsg_delay.delay, 1);
694 list_add_tail(&(cm->lh), &(cr->msgs));
696 return 5;
699 static int add_credits(struct sk_buff *skb, struct control_retrans *cr,
700 struct control_msg_out *cm, int spaceleft)
702 unsigned long iflags;
703 char *dst;
705 if (unlikely(spaceleft < 21))
706 return 0;
708 dst = skb_put(skb, 21);
709 BUG_ON(dst == 0);
711 dst[0] = KP_SET_CREDITS;
713 spin_lock_irqsave( &(cm->nb->credits_lock), iflags );
715 refresh_credits_state(cm->nb);
717 put_u64(dst + 1, cm->nb->debits, 1);
718 put_u32(dst + 9, cm->nb->debitrate_initial +
719 cm->nb->debitrate_initial_adj, 1);
720 put_u32(dst + 13, cm->nb->debitrate_earning, 1);
721 put_u32(dst + 17, cm->nb->debitrate_spending, 1);
723 spin_unlock_irqrestore( &(cm->nb->credits_lock), iflags );
725 list_add_tail(&(cm->lh), &(cr->msgs));
727 return 21;
730 static int add_message(struct sk_buff *skb, struct control_retrans *cr,
731 struct control_msg_out *cm, int spaceleft,
732 struct control_msg_out **split_conndata, __u32 *sc_sendlen)
734 BUG_ON(split_conndata != 0 && *split_conndata != 0);
735 BUG_ON(sc_sendlen != 0 && *sc_sendlen != 0);
737 switch (cm->type) {
738 case MSGTYPE_ACK:
739 return add_ack(skb, cr, cm, spaceleft);
740 case MSGTYPE_ACK_CONN:
741 return add_ack_conn(skb, cr, cm, spaceleft);
742 case MSGTYPE_ACK_CONN_OOO:
743 return add_ack_conn_ooo(skb, cr, cm, spaceleft);
744 case MSGTYPE_PONG:
745 return add_pong(skb, cr, cm, spaceleft);
746 case MSGTYPE_CONNECT:
747 return add_connect(skb, cr, cm, spaceleft);
748 case MSGTYPE_CONNECT_SUCCESS:
749 return add_connect_success(skb, cr, cm, spaceleft);
750 case MSGTYPE_RESET_CONN:
751 return add_reset_conn(skb, cr, cm, spaceleft);
752 case MSGTYPE_CONNDATA:
753 return add_conndata(skb, cr, cm, spaceleft, split_conndata,
754 sc_sendlen);
755 case MSGTYPE_PING_CONN:
756 return add_ping_conn(skb, cr, cm, spaceleft);
757 case MSGTYPE_CONNID_UNKNOWN:
758 return add_connid_unknown(skb, cr, cm, spaceleft);
759 case MSGTYPE_PING_ALL_CONNS:
760 return add_ping_all_conns(skb, cr, cm, spaceleft);
761 case MSGTYPE_SET_MAX_CMSG_DELAY:
762 return add_set_max_cmsg_dly(skb, cr, cm, spaceleft);
763 case MSGTYPE_SET_CREDITS:
764 return add_credits(skb, cr, cm, spaceleft);
765 default:
766 BUG();
768 BUG();
769 return 0;
772 static __u32 recount_ping_conns(struct neighbor *nb)
774 __u32 cnt;
775 struct list_head *curr = nb->next_ping_conn->target.out.nb_list.next;
776 while (curr != &(nb->snd_conn_list)) {
777 cnt++;
778 BUG_ON(cnt > 1000000000);
780 return cnt;
783 static __u32 __send_messages_pc(struct neighbor *nb, struct sk_buff *skb,
784 struct control_retrans *cr, int spaceleft)
786 __u32 length = 0;
787 mutex_lock(&(nb->conn_list_lock));
788 while (nb->next_ping_conn != 0) {
789 struct conn *rconn;
790 struct conn *sconn;
791 struct list_head *next;
792 struct control_msg_out *cm;
793 int rc;
795 rconn = nb->next_ping_conn;
796 sconn = rconn->reversedir;
798 BUG_ON(rconn->targettype != TARGET_OUT);
799 BUG_ON(sconn->sourcetype != SOURCE_IN);
801 if (unlikely(rconn->target.out.conn_id))
802 goto next;
804 if (nb->ping_conns_remaining == 0) {
805 atomic_set(&(sconn->source.in.pong_awaiting), 1);
806 nb->pong_conns_expected++;
807 nb->ping_conns_remaining--;
808 if (unlikely(nb->ping_conns_remaining == 0))
809 nb->ping_conns_remaining =
810 recount_ping_conns(nb);
811 } else {
812 if (likely(atomic_read(&(
813 sconn->source.in.pong_awaiting)) == 0))
814 goto next;
815 nb->ping_conns_remaining--;
816 if (unlikely(nb->ping_conns_retrans_remaining == 0))
817 nb->ping_conns_retrans_remaining =
818 recount_ping_conns(nb);
821 cm = alloc_control_msg(nb, ACM_PRIORITY_LOW);
822 cm->length = 5;
823 cm->type = MSGTYPE_PING_CONN;
824 cm->msg.ping_conn.conn_id = rconn->target.out.conn_id;
825 rc = add_message(skb, cr, cm, spaceleft - length, 0, 0);
826 if (rc == 0)
827 break;
829 length = rc;
830 next:
831 next = rconn->target.out.nb_list.next;
832 nb->next_ping_conn = container_of(next, struct conn,
833 target.out.nb_list);
834 if (next == &(nb->snd_conn_list)) {
835 nb->next_ping_conn = 0;
836 nb->ping_conns_remaining = 0;
839 if (unlikely(length != 0)) {
840 nb->ping_conn_completed = jiffies;
842 mutex_unlock(&(nb->conn_list_lock));
843 return length;
846 static __u32 __send_messages(struct neighbor *nb, struct sk_buff *skb,
847 struct control_retrans *cr, int spaceleft, int urgentonly,
848 struct control_msg_out **split_conndata, __u32 *sc_sendlen)
850 __u32 length = 0;
851 while (!list_empty(&(nb->ucontrol_msgs_out)) || (!urgentonly &&
852 !list_empty(&(nb->control_msgs_out)))) {
853 int rc;
855 int urgent = !list_empty(&(nb->ucontrol_msgs_out));
857 struct control_msg_out *cm;
859 if (urgent)
860 cm = container_of(nb->ucontrol_msgs_out.next,
861 struct control_msg_out, lh);
862 else
863 cm = container_of(nb->control_msgs_out.next,
864 struct control_msg_out, lh);
866 list_del(&(cm->lh));
867 if (urgent)
868 nb->ucmlength -= cm->length;
869 else
870 nb->cmlength -= cm->length;
871 mutex_unlock(&(nb->cmsg_lock));
872 rc = add_message(skb, cr, cm, spaceleft - length,
873 split_conndata, sc_sendlen);
874 mutex_lock(&(nb->cmsg_lock));
876 if (rc == 0) {
877 if (urgent) {
878 list_add(&(cm->lh), &(nb->ucontrol_msgs_out));
879 nb->ucmlength += cm->length;
880 } else {
881 list_add(&(cm->lh), &(nb->control_msgs_out));
882 nb->cmlength += cm->length;
884 break;
887 length += rc;
890 return length;
893 static int msgtype_present(struct neighbor *nb, __u8 type)
895 struct list_head *curr;
897 curr = nb->control_msgs_out.next;
898 while (curr != &(nb->control_msgs_out)) {
899 struct control_msg_out *cm = container_of(curr,
900 struct control_msg_out, lh);
902 if (cm->type == MSGTYPE_PING_ALL_CONNS)
903 return 1;
905 curr = curr->next;
908 return 0;
911 static int ping_all_conns_needed(struct neighbor *nb)
913 if (likely(nb->ping_all_conns == 0))
914 return 0;
916 if (msgtype_present(nb, MSGTYPE_PING_ALL_CONNS))
917 return 0;
919 return 1;
922 static int __send_messages_cred(struct neighbor *nb, struct sk_buff *skb,
923 struct control_retrans *cr, int spaceleft)
925 struct control_msg_out *cm;
926 int rc;
928 cm = alloc_control_msg(nb, ACM_PRIORITY_MED);
930 if (unlikely(cm == 0))
931 return 0;
933 cm->type = MSGTYPE_SET_CREDITS;
934 cm->length = 21;
936 rc = add_message(skb, cr, cm, spaceleft, 0, 0);
938 nb->send_credits = 0;
939 return rc;
942 static int __send_messages_smcd(struct neighbor *nb, struct sk_buff *skb,
943 struct control_retrans *cr, int spaceleft)
945 struct control_msg_out *cm;
946 int rc;
948 cm = alloc_control_msg(nb, ACM_PRIORITY_MED);
950 if (unlikely(cm == 0))
951 return 0;
953 cm->type = MSGTYPE_SET_MAX_CMSG_DELAY;
954 cm->msg.set_max_cmsg_delay.delay = CMSG_INTERVAL_MS * 10;
955 cm->length = 5;
957 rc = add_message(skb, cr, cm, spaceleft, 0, 0);
959 nb->max_cmsg_delay_sent = 1;
960 return rc;
963 static int __send_messages_pac(struct neighbor *nb, struct sk_buff *skb,
964 struct control_retrans *cr, int spaceleft)
966 struct control_msg_out *cm;
967 int rc;
969 cm = alloc_control_msg(nb, ACM_PRIORITY_MED);
971 if (unlikely(cm == 0))
972 return 0;
974 cm->type = MSGTYPE_PING_ALL_CONNS;
975 cm->length = 1;
977 rc = add_message(skb, cr, cm, spaceleft, 0, 0);
979 nb->ping_all_conns = 0;
980 return rc;
984 static int _send_messages(struct neighbor *nb, struct sk_buff *skb, int ping,
985 struct control_retrans *cr, int spaceleft, int urgentonly)
987 int rc;
988 int length = 0;
989 __u32 pingcookie = 0;
990 struct control_msg_out *split_conndata = 0;
991 __u32 sc_sendlen = 0;
993 mutex_lock(&(nb->cmsg_lock));
995 if (ping != 0) {
996 int rc;
997 pingcookie = add_ping_req(nb);
998 rc = add_ping(skb, pingcookie, spaceleft - length);
999 BUG_ON(rc == 0);
1000 length += rc;
1003 if (likely(urgentonly == 0) && unlikely(ping_all_conns_needed(nb) != 0))
1004 length += __send_messages_pac(nb, skb, cr, spaceleft - length);
1006 if (likely(urgentonly == 0) && unlikely(nb->max_cmsg_delay_sent == 0))
1007 length += __send_messages_smcd(nb, skb, cr, spaceleft - length);
1009 if (likely(urgentonly == 0) && unlikely(nb->send_credits != 0) &&
1010 msgtype_present(nb, MSGTYPE_SET_CREDITS) == 0)
1011 length += __send_messages_cred(nb, skb, cr, spaceleft - length);
1014 length += __send_messages(nb, skb, cr, spaceleft - length, urgentonly,
1015 &split_conndata, &sc_sendlen);
1017 if (likely(urgentonly == 0))
1018 length += __send_messages_pc(nb, skb, cr, spaceleft - length);
1020 mutex_unlock(&(nb->cmsg_lock));
1022 if (unlikely(length > spaceleft))
1023 printk(KERN_ERR "error cor/kpacket_gen: length > spaceleft!?");
1025 padding(skb, spaceleft - length);
1027 rc = dev_queue_xmit(skb);
1029 if (rc != 0) {
1030 unadd_ping_req(nb, pingcookie);
1032 while (list_empty(&(cr->msgs)) == 0) {
1033 struct control_msg_out *cm = container_of(cr->msgs.prev,
1034 struct control_msg_out, lh);
1035 list_del(&(cm->lh));
1036 add_control_msg(cm, 1);
1039 if (split_conndata != 0) {
1040 add_control_msg(split_conndata, 1);
1043 kref_put(&(cr->ref), free_control_retrans);
1044 } else {
1045 struct list_head *curr = cr->msgs.next;
1047 while(curr != &(cr->msgs)) {
1048 struct control_msg_out *cm = container_of(curr,
1049 struct control_msg_out, lh);
1051 curr = curr->next;
1053 if (cm->type == MSGTYPE_CONNDATA) {
1054 list_del(&(cm->lh));
1055 kfree(cm->msg.conn_data.data_orig);
1056 free_control_msg(cm);
1060 if (split_conndata != 0) {
1061 BUG_ON(sc_sendlen == 0);
1062 BUG_ON(sc_sendlen >=
1063 split_conndata->msg.conn_data.datalen);
1065 split_conndata->msg.conn_data.data += sc_sendlen;
1066 split_conndata->msg.conn_data.datalen -= sc_sendlen;
1068 send_conndata(split_conndata,
1069 split_conndata->msg.conn_data.conn_id,
1070 split_conndata->msg.conn_data.seqno,
1071 split_conndata->msg.conn_data.data_orig,
1072 split_conndata->msg.conn_data.data,
1073 split_conndata->msg.conn_data.datalen);
1077 if (list_empty(&(cr->msgs)))
1078 kref_put(&(cr->ref), free_control_retrans);
1079 else
1080 schedule_retransmit(cr, nb);
1083 return rc;
1086 static __u32 get_total_messages_length(struct neighbor *nb, int ping,
1087 int urgentonly)
1089 __u32 length = nb->ucmlength;
1090 if (likely(nb->send_credits == 0) && unlikely(debit_adj_needed(nb)))
1091 nb->send_credits = 1;
1093 if (likely(urgentonly == 0)) {
1094 length += nb->cmlength + nb->ping_conns_remaining * 5;
1095 if (likely(nb->ping_conns_remaining == 0)) {
1096 if (likely(nb->ping_conns_retrans_remaining == 0) &&
1097 unlikely(nb->pong_conns_expected !=0) &&
1098 time_before(nb->ping_conn_completed,
1099 jiffies + msecs_to_jiffies(
1100 PING_ALL_CONNS_TIMEOUT) +
1101 usecs_to_jiffies(((__u32) atomic_read(&(
1102 nb->latency))) * 2 + ((__u32)
1103 atomic_read(&(nb->max_remote_cmsg_delay)
1104 )))))
1105 nb->ping_conns_retrans_remaining =
1106 nb->pong_conns_expected;
1108 if (unlikely(nb->ping_conns_retrans_remaining >
1109 nb->pong_conns_expected))
1110 nb->ping_conns_retrans_remaining =
1111 nb->pong_conns_expected;
1113 length += nb->ping_conns_retrans_remaining * 5;
1115 if (unlikely(ping_all_conns_needed(nb) != 0))
1116 length += 1;
1117 if (unlikely(nb->max_cmsg_delay_sent == 0))
1118 length += 5;
1119 if (unlikely(nb->send_credits == 2) &&
1120 msgtype_present(nb, MSGTYPE_SET_CREDITS) == 0)
1121 length += 21;
1123 if (ping == 2 || (length > 0 && ping != 0))
1124 length += 5;
1125 if (likely(urgentonly == 0) && length > 0 &&
1126 unlikely(nb->send_credits == 1) &&
1127 msgtype_present(nb, MSGTYPE_SET_CREDITS) == 0)
1128 length += 21;
1130 return length;
1133 static int send_messages(struct neighbor *nb, int allmsgs, int resume)
1135 int rc = 0;
1136 int ping;
1137 int targetmss = mss(nb);
1139 int nbstate = get_neigh_state(nb);
1140 int urgentonly = (nbstate != NEIGHBOR_STATE_ACTIVE);
1142 check_credit_state(nb);
1144 mutex_lock(&(nb->cmsg_lock));
1146 if (resume)
1147 allmsgs = nb->kp_allmsgs;
1149 ping = time_to_send_ping(nb);
1151 while (1) {
1152 __u32 length;
1154 __u32 seqno;
1155 struct sk_buff *skb;
1156 struct control_retrans *cr;
1158 BUG_ON(list_empty(&(nb->control_msgs_out)) &&
1159 (nb->cmlength != 0));
1160 BUG_ON((list_empty(&(nb->control_msgs_out)) == 0) &&
1161 (nb->cmlength == 0));
1162 BUG_ON(list_empty(&(nb->ucontrol_msgs_out)) &&
1163 (nb->ucmlength != 0));
1164 BUG_ON((list_empty(&(nb->ucontrol_msgs_out)) == 0) &&
1165 (nb->ucmlength == 0));
1166 BUG_ON(nb->cmlength < 0);
1167 BUG_ON(nb->ucmlength < 0);
1169 length = get_total_messages_length(nb, ping, urgentonly);
1171 if (length == 0)
1172 break;
1174 if (length < targetmss && allmsgs == 0)
1175 break;
1177 seqno = atomic_add_return(1, &(nb->kpacket_seqno));
1179 if (length > targetmss)
1180 length = targetmss;
1182 mutex_unlock(&(nb->cmsg_lock));
1183 skb = create_packet(nb, length, GFP_KERNEL, 0, seqno);
1184 if (unlikely(skb == 0)) {
1185 printk(KERN_ERR "cor: send_messages: cannot allocate "
1186 "skb (out of memory?)");
1187 goto oom;
1190 cr = kmem_cache_alloc(controlretrans_slab, GFP_KERNEL);
1191 if (unlikely(cr == 0)) {
1192 kfree_skb(skb);
1193 printk(KERN_ERR "cor: send_messages: cannot allocate "
1194 "control_retrans (out of memory?)");
1195 goto oom;
1197 memset(cr, 0, sizeof(struct control_retrans));
1198 kref_init(&(cr->ref));
1199 cr->nb = nb;
1200 cr->seqno = seqno;
1201 INIT_LIST_HEAD(&(cr->msgs));
1203 rc = _send_messages(nb, skb, ping, cr, length, urgentonly);
1204 ping = 0;
1206 mutex_lock(&(nb->cmsg_lock));
1208 if (rc != 0)
1209 break;
1212 if (0) {
1213 oom:
1214 mutex_lock(&(nb->cmsg_lock));
1217 if (rc != 0) {
1218 if (resume == 0) {
1219 nb->kp_allmsgs = nb->kp_allmsgs || allmsgs;
1220 qos_enqueue_kpacket(nb);
1222 } else if (allmsgs) {
1223 nb->kp_allmsgs = 0;
1226 mutex_unlock(&(nb->cmsg_lock));
1228 if (allmsgs)
1229 schedule_controlmsg_timerfunc(nb);
1231 return rc;
1234 int resume_send_messages(struct neighbor *nb)
1236 return send_messages(nb, 0, 1);
1239 static void controlmsg_timerfunc(struct work_struct *work)
1241 struct neighbor *nb = container_of(to_delayed_work(work),
1242 struct neighbor, cmsg_timer);
1243 __u64 jiffies = get_jiffies_64();
1245 mutex_lock(&(nb->cmsg_lock));
1247 if (nb->timeout > jiffies) {
1248 INIT_DELAYED_WORK(&(nb->cmsg_timer), controlmsg_timerfunc);
1249 schedule_delayed_work(&(nb->cmsg_timer), nb->timeout - jiffies);
1250 mutex_unlock(&(nb->cmsg_lock));
1251 return;
1254 mutex_unlock(&(nb->cmsg_lock));
1256 send_messages(nb, 1, 0);
1257 kref_put(&(nb->ref), neighbor_free);
1260 void schedule_controlmsg_timerfunc(struct neighbor *nb)
1262 __u64 jiffies = get_jiffies_64();
1263 long long delay;
1265 int state = get_neigh_state(nb);
1267 if (unlikely(state == NEIGHBOR_STATE_KILLED))
1268 return;
1270 mutex_lock(&(nb->cmsg_lock));
1271 nb->timeout += msecs_to_jiffies(CMSG_INTERVAL_MS);
1273 delay = nb->timeout - jiffies;
1274 if (delay < 0) {
1275 delay = 1;
1276 nb->timeout = jiffies;
1279 INIT_DELAYED_WORK(&(nb->cmsg_timer), controlmsg_timerfunc);
1280 schedule_delayed_work(&(nb->cmsg_timer), delay);
1281 mutex_unlock(&(nb->cmsg_lock));
1282 kref_get(&(nb->ref));
1285 static void free_oldest_ucm(struct neighbor *nb)
1287 struct control_msg_out *cm = container_of(nb->ucontrol_msgs_out.next,
1288 struct control_msg_out, lh);
1290 BUG_ON(list_empty(&(nb->ucontrol_msgs_out)));
1291 BUG_ON(isurgent(cm) == 0);
1293 list_del(&(cm->lh));
1294 nb->ucmlength -= cm->length;
1295 atomic_dec(&(nb->ucmcnt));
1296 free_control_msg(cm);
1299 static void add_control_msg(struct control_msg_out *cm, int retrans)
1301 int nbstate;
1303 BUG_ON(cm->nb == 0);
1305 nbstate = get_neigh_state(cm->nb);
1307 BUG_ON(cm == 0);
1308 BUG_ON(cm->lh.next != LIST_POISON1 || cm->lh.prev != LIST_POISON2);
1310 mutex_lock(&(cm->nb->cmsg_lock));
1312 if (isurgent(cm)) {
1313 long msgs;
1315 msgs = atomic_inc_return(&(cm->nb->ucmcnt));
1316 BUG_ON(msgs <= 0);
1318 if (unlikely(retrans)) {
1319 if (msgs > MAX_URGENT_CMSGS_PER_NEIGH_RETRANSALLOW ||
1320 msgs > MAX_URGENT_CMSGS_PER_NEIGH) {
1321 atomic_dec(&(cm->nb->ucmcnt));
1322 free_control_msg(cm);
1323 goto out;
1326 cm->nb->ucmlength += cm->length;
1327 list_add(&(cm->lh), &(cm->nb->ucontrol_msgs_out));
1328 } else {
1329 if (msgs > MAX_URGENT_CMSGS_PER_NEIGH) {
1330 free_oldest_ucm(cm->nb);
1333 cm->nb->ucmlength += cm->length;
1334 list_add_tail(&(cm->lh), &(cm->nb->ucontrol_msgs_out));
1336 } else {
1337 cm->nb->cmlength += cm->length;
1338 list_add_tail(&(cm->lh), &(cm->nb->control_msgs_out));
1341 if (unlikely((nbstate == NEIGHBOR_STATE_ACTIVE ? cm->nb->cmlength : 0)+
1342 cm->nb->ucmlength >= mss(cm->nb)))
1343 send_messages(cm->nb, 0, 0);
1345 out:
1346 mutex_unlock(&(cm->nb->cmsg_lock));
1349 void send_pong(struct neighbor *nb, __u32 cookie)
1351 struct control_msg_out *cm = _alloc_control_msg(nb, 0, 1);
1353 if (unlikely(cm == 0))
1354 return;
1356 cm->nb = nb;
1357 cm->type = MSGTYPE_PONG;
1358 cm->msg.pong.cookie = cookie;
1359 cm->msg.pong.time_enqueued = jiffies;
1360 cm->length = 9;
1361 add_control_msg(cm, 0);
1364 void send_reset_conn(struct control_msg_out *cm, __u32 conn_id)
1366 cm->type = MSGTYPE_RESET_CONN;
1367 cm->msg.reset.conn_id = conn_id;
1368 cm->length = 5;
1369 add_control_msg(cm, 0);
1372 void send_ack(struct neighbor *nb, __u32 seqno)
1374 struct control_msg_out *cm = _alloc_control_msg(nb, 0, 1);
1376 if (unlikely(cm == 0))
1377 return;
1379 cm->nb = nb;
1380 cm->type = MSGTYPE_ACK;
1381 cm->msg.ack.seqno = seqno;
1382 cm->length = 5;
1383 add_control_msg(cm, 0);
1386 void send_ack_conn(struct control_msg_out *cm, struct conn *rconn,
1387 __u32 conn_id, __u32 seqno)
1389 cm->type = MSGTYPE_ACK_CONN;
1390 kref_get(&(rconn->ref));
1391 BUG_ON(rconn->sourcetype != SOURCE_IN);
1392 cm->msg.ack_conn.rconn = rconn;
1393 cm->msg.ack_conn.conn_id = conn_id;
1394 cm->msg.ack_conn.seqno = seqno;
1396 cm->length = 10;
1397 add_control_msg(cm, 0);
1400 void send_ack_conn_ooo(struct control_msg_out *cm, struct conn *rconn,
1401 __u32 conn_id, __u32 seqno, __u32 seqno_ooo, __u32 length)
1403 cm->type = MSGTYPE_ACK_CONN_OOO;
1404 kref_get(&(rconn->ref));
1405 BUG_ON(rconn->sourcetype != SOURCE_IN);
1406 cm->msg.ack_conn_ooo.rconn = rconn;
1407 cm->msg.ack_conn_ooo.conn_id = conn_id;
1408 cm->msg.ack_conn_ooo.seqno = seqno;
1409 cm->msg.ack_conn_ooo.seqno_ooo = seqno_ooo;
1410 cm->msg.ack_conn_ooo.length = length;
1411 cm->length = 18;
1412 add_control_msg(cm, 0);
1415 void send_connect_success(struct control_msg_out *cm, __u32 rcvd_conn_id,
1416 __u32 gen_conn_id, __u32 init_seqno, struct conn *rconn)
1418 cm->type = MSGTYPE_CONNECT_SUCCESS;
1419 cm->msg.connect_success.rcvd_conn_id = rcvd_conn_id;
1420 cm->msg.connect_success.gen_conn_id = gen_conn_id;
1421 cm->msg.connect_success.init_seqno = init_seqno;
1422 kref_get(&(rconn->ref));
1423 BUG_ON(rconn->sourcetype != SOURCE_IN);
1424 cm->msg.connect_success.rconn = rconn;
1425 cm->length = 14;
1426 add_control_msg(cm, 0);
1429 void send_connect_nb(struct control_msg_out *cm, __u32 conn_id,
1430 __u32 init_seqno, struct conn *sconn)
1432 cm->type = MSGTYPE_CONNECT;
1433 cm->msg.connect.conn_id = conn_id;
1434 cm->msg.connect.init_seqno = init_seqno;
1435 kref_get(&(sconn->ref));
1436 BUG_ON(sconn->sourcetype != SOURCE_IN);
1437 cm->msg.connect.sconn = sconn;
1438 cm->length = 10;
1439 add_control_msg(cm, 0);
1442 void send_conndata(struct control_msg_out *cm, __u32 conn_id, __u32 seqno,
1443 char *data_orig, char *data, __u32 datalen)
1445 cm->type = MSGTYPE_CONNDATA;
1446 cm->msg.conn_data.conn_id = conn_id;
1447 cm->msg.conn_data.seqno = seqno;
1448 cm->msg.conn_data.data_orig = data_orig;
1449 cm->msg.conn_data.data = data;
1450 cm->msg.conn_data.datalen = datalen;
1451 cm->length = 11 + datalen;
1452 add_control_msg(cm, 0);
1455 void send_ping_conn(struct control_msg_out *cm, __u32 conn_id)
1457 cm->type = MSGTYPE_PING_CONN;
1458 cm->msg.ping_conn.conn_id = conn_id;
1459 cm->length = 5;
1460 add_control_msg(cm, 0);
1464 void send_connid_unknown(struct control_msg_out *cm, __u32 conn_id)
1466 cm->type = MSGTYPE_CONNID_UNKNOWN;
1467 cm->msg.connid_unknown.conn_id = conn_id;
1468 cm->length = 5;
1469 add_control_msg(cm, 0);
1472 void send_ping_all_conns(struct neighbor *nb)
1474 mutex_lock(&(nb->cmsg_lock));
1475 nb->ping_all_conns = 1;
1476 mutex_unlock(&(nb->cmsg_lock));
1479 void send_credits(struct neighbor *nb)
1481 mutex_lock(&(nb->cmsg_lock));
1482 nb->send_credits = 2;
1483 mutex_unlock(&(nb->cmsg_lock));
1486 static int matches_connretrans(void *htentry, void *searcheditem)
1488 struct control_retrans *cr = (struct control_retrans *) htentry;
1489 struct retransmit_matchparam *rm = (struct retransmit_matchparam *)
1490 searcheditem;
1492 return rm->nb == cr->nb && rm->seqno == cr->seqno;
1495 void __init cor_kgen_init(void)
1497 controlmsg_slab = kmem_cache_create("cor_controlmsg",
1498 sizeof(struct control_msg_out), 8, 0, 0);
1499 controlretrans_slab = kmem_cache_create("cor_controlretransmsg",
1500 sizeof(struct control_retrans), 8, 0, 0);
1501 htable_init(&retransmits, matches_connretrans,
1502 offsetof(struct control_retrans, htab_entry),
1503 offsetof(struct control_retrans, ref));