conn reference renaming and locking logic
[cor_2_6_31.git] / net / cor / kpacket_gen.c
blobc483e4f71cf098d10478d815e3df2e348aa742e6
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2011 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <asm/byteorder.h>
23 #include "cor.h"
25 /* not sent over the network - internal meaning only */
26 #define MSGTYPE_PONG 1
27 #define MSGTYPE_ACK 2
28 #define MSGTYPE_ACK_CONN 3
29 #define MSGTYPE_CONNECT 4
30 #define MSGTYPE_CONNECT_SUCCESS 5
31 #define MSGTYPE_RESET_CONN 6
32 #define MSGTYPE_CONNDATA 7
33 #define MSGTYPE_CONNID_UNKNOWN 8
34 #define MSGTYPE_PING_ALL_CONNS 9
35 #define MSGTYPE_SET_MAX_CMSG_DELAY 10
37 struct control_msg_out{
38 struct list_head lh; /* either neighbor or control_retrans_packet */
39 struct neighbor *nb;
41 __u32 length;
43 __u8 type;
44 union{
45 struct{
46 __u32 cookie;
47 unsigned long time_enqueued; /* jiffies */
48 int sent;
49 }pong;
51 struct{
52 __u32 seqno;
53 }ack;
55 struct{
56 struct conn *rconn;
57 __u32 conn_id;
58 __u32 seqno;
59 __u32 seqno_ooo;
60 __u32 length;
62 __u8 decaytime_seqno;
63 __u16 decaytime;
65 __u8 flags;
66 }ack_conn;
68 struct{
69 __u32 conn_id;
70 __u32 init_seqno;
71 struct conn *sconn;
72 }connect;
74 struct{
75 __u32 rcvd_conn_id;
76 __u32 gen_conn_id;
77 __u32 init_seqno;
78 struct conn *rconn;
79 }connect_success;
81 struct{
82 __u32 conn_id;
83 }reset;
85 struct{
86 __u32 conn_id;
87 __u32 seqno;
88 char *data_orig;
89 char *data;
90 __u32 datalen;
91 }conn_data;
93 struct{
94 __u32 conn_id;
95 }connid_unknown;
97 struct{
98 __u32 delay;
99 }set_max_cmsg_delay;
100 }msg;
103 struct control_retrans {
104 struct kref ref;
106 struct neighbor *nb;
107 __u32 seqno;
109 unsigned long timeout;
111 struct list_head msgs;
113 struct htab_entry htab_entry;
114 struct list_head timeout_list;
117 struct kmem_cache *controlmsg_slab;
118 struct kmem_cache *controlretrans_slab;
120 static struct htable retransmits;
122 atomic_t cmcnt = ATOMIC_INIT(0);
124 static void add_control_msg(struct control_msg_out *msg, int retrans);
126 static inline int isurgent(struct control_msg_out *cm)
128 if (unlikely(cm->type == MSGTYPE_PONG || cm->type == MSGTYPE_ACK))
129 return 1;
130 return 0;
133 static struct control_msg_out *__alloc_control_msg(void)
135 struct control_msg_out *cm = kmem_cache_alloc(controlmsg_slab,
136 GFP_KERNEL);
137 if (unlikely(cm == 0))
138 return 0;
139 memset(cm, 0, sizeof(struct control_msg_out));
140 cm->lh.next = LIST_POISON1;
141 cm->lh.prev = LIST_POISON2;
142 return cm;
145 static int calc_limit(int limit, int priority)
147 if (priority == ACM_PRIORITY_LOW)
148 return (limit+1)/2;
149 else if (priority == ACM_PRIORITY_MED)
150 return (limit*3 + 3)/4;
151 else if (priority == ACM_PRIORITY_HIGH)
152 return limit;
153 else
154 BUG();
157 int may_alloc_control_msg(struct neighbor *nb, int priority)
159 long packets1 = atomic_read(&(nb->cmcnt));
160 long packets2 = atomic_read(&(cmcnt));
162 BUG_ON(packets1 < 0);
163 BUG_ON(packets2 < 0);
165 if (packets1 < calc_limit(GUARANTEED_CMSGS_PER_NEIGH, priority))
166 return 1;
168 if (unlikely(unlikely(packets2 >= calc_limit(MAX_CMSGS_PER_NEIGH,
169 priority)) || unlikely(packets1 >= (
170 calc_limit(MAX_CMSGS_PER_NEIGH, priority) *
171 (MAX_CMSGS - packets2) / MAX_CMSGS))))
172 return 0;
173 return 1;
176 static struct control_msg_out *_alloc_control_msg(struct neighbor *nb,
177 int priority, int urgent)
179 struct control_msg_out *cm = 0;
181 BUG_ON(nb == 0);
183 if (urgent == 0) {
184 long packets1 = atomic_inc_return(&(nb->cmcnt));
185 long packets2 = atomic_inc_return(&(cmcnt));
187 BUG_ON(packets1 <= 0);
188 BUG_ON(packets2 <= 0);
190 if (packets1 <= calc_limit(GUARANTEED_CMSGS_PER_NEIGH,
191 priority))
192 goto alloc;
194 if (unlikely(unlikely(packets2 > calc_limit(MAX_CMSGS_PER_NEIGH,
195 priority)) || unlikely(packets1 > (
196 calc_limit(MAX_CMSGS_PER_NEIGH, priority) *
197 (MAX_CMSGS - packets2) / MAX_CMSGS))))
198 goto full;
201 alloc:
202 cm = __alloc_control_msg();
203 if (unlikely(cm == 0))
204 goto full;
205 cm->nb = nb;
207 if (0) {
208 full:
209 if (urgent == 0) {
210 atomic_dec(&(nb->cmcnt));
211 atomic_dec(&(cmcnt));
214 return cm;
217 struct control_msg_out *alloc_control_msg(struct neighbor *nb, int priority)
219 return _alloc_control_msg(nb, priority, 0);
222 void free_control_msg(struct control_msg_out *cm)
224 if (isurgent(cm) == 0) {
225 atomic_dec(&(cm->nb->cmcnt));
226 atomic_dec(&(cmcnt));
229 if (cm->type == MSGTYPE_ACK_CONN) {
230 struct conn *sconn = cm->msg.ack_conn.rconn->reversedir;
231 BUG_ON(cm->msg.ack_conn.rconn == 0);
232 BUG_ON(sconn->targettype != TARGET_OUT);
233 if ((cm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_CREDITS) != 0 &&
234 sconn->target.out.decaytime_send_allowed != 0) {
235 sconn->target.out.decaytime_send_allowed = 0;
236 refresh_conn_credits(sconn, 0, 0);
239 kref_put(&(cm->msg.ack_conn.rconn->ref), free_conn);
240 cm->msg.ack_conn.rconn = 0;
241 } else if (cm->type == MSGTYPE_CONNECT) {
242 BUG_ON(cm->msg.connect.sconn == 0);
243 kref_put(&(cm->msg.connect.sconn->ref), free_conn);
244 cm->msg.connect.sconn = 0;
245 } else if (cm->type == MSGTYPE_CONNECT_SUCCESS) {
246 BUG_ON(cm->msg.connect_success.rconn == 0);
247 kref_put(&(cm->msg.connect_success.rconn->ref), free_conn);
248 cm->msg.connect_success.rconn = 0;
251 kmem_cache_free(controlmsg_slab, cm);
254 static void free_control_retrans(struct kref *ref)
256 struct control_retrans *cr = container_of(ref, struct control_retrans,
257 ref);
259 while (list_empty(&(cr->msgs)) == 0) {
260 struct control_msg_out *cm = container_of(cr->msgs.next,
261 struct control_msg_out, lh);
262 list_del(&(cm->lh));
263 free_control_msg(cm);
266 kmem_cache_free(controlretrans_slab, cr);
269 struct retransmit_matchparam {
270 struct neighbor *nb;
271 __u32 seqno;
274 static __u32 rm_to_key(struct retransmit_matchparam *rm)
276 return ((__u32)((long) rm->nb)) ^ rm->seqno;
279 static void set_retrans_timeout(struct control_retrans *cr, struct neighbor *nb)
281 cr->timeout = jiffies + usecs_to_jiffies(100000 +
282 ((__u32) atomic_read(&(nb->latency))) * 2 +
283 ((__u32) atomic_read(&(nb->max_remote_cmsg_delay))));
286 void retransmit_timerfunc(struct work_struct *work)
288 unsigned long iflags;
290 struct neighbor *nb = container_of(to_delayed_work(work),
291 struct neighbor, retrans_timer);
293 int nbstate;
294 int nbput = 0;
296 spin_lock_irqsave(&(nb->state_lock), iflags);
297 nbstate = nb->state;
298 spin_unlock_irqrestore(&(nb->state_lock), iflags);
300 while (1) {
301 struct control_retrans *cr = 0;
302 struct retransmit_matchparam rm;
304 spin_lock_irqsave(&(nb->retrans_lock), iflags);
306 if (list_empty(&(nb->retrans_list))) {
307 nb->retrans_timer_running = 0;
308 nbput = 1;
309 break;
312 cr = container_of(nb->retrans_list.next,
313 struct control_retrans, timeout_list);
315 BUG_ON(cr->nb != nb);
317 rm.seqno = cr->seqno;
318 rm.nb = nb;
320 list_del(&(cr->timeout_list));
322 if (unlikely(nbstate == NEIGHBOR_STATE_KILLED)) {
323 spin_unlock_irqrestore(&(nb->retrans_lock), iflags);
325 htable_delete(&retransmits, rm_to_key(&rm), &rm,
326 free_control_retrans);
327 kref_put(&(cr->ref), free_control_retrans);
328 continue;
331 if (time_after(cr->timeout, jiffies)) {
332 list_add(&(cr->timeout_list), &(nb->retrans_list));
333 schedule_delayed_work(&(nb->retrans_timer),
334 cr->timeout - jiffies);
335 break;
338 if (unlikely(htable_delete(&retransmits, rm_to_key(&rm), &rm,
339 free_control_retrans)))
340 BUG();
342 spin_unlock_irqrestore(&(nb->retrans_lock), iflags);
344 while (list_empty(&(cr->msgs)) == 0) {
345 struct control_msg_out *cm = container_of(cr->msgs.next,
346 struct control_msg_out, lh);
347 list_del(&(cm->lh));
348 add_control_msg(cm, 1);
351 kref_put(&(cr->ref), free_control_retrans);
354 spin_unlock_irqrestore(&(nb->retrans_lock), iflags);
356 if (nbput)
357 kref_put(&(nb->ref), neighbor_free);
360 static void schedule_retransmit(struct control_retrans *cr, struct neighbor *nb)
362 unsigned long iflags;
364 struct retransmit_matchparam rm;
365 int first;
367 rm.seqno = cr->seqno;
368 rm.nb = nb;
370 set_retrans_timeout(cr, nb);
372 spin_lock_irqsave(&(nb->retrans_lock), iflags);
373 htable_insert(&retransmits, (char *) cr, rm_to_key(&rm));
374 first = list_empty(&(nb->retrans_list));
375 list_add_tail(&(cr->timeout_list), &(nb->retrans_list));
377 if (first && nb->retrans_timer_running == 0) {
378 schedule_delayed_work(&(nb->retrans_timer),
379 cr->timeout - jiffies);
380 nb->retrans_timer_running = 1;
381 kref_get(&(nb->ref));
384 spin_unlock_irqrestore(&(nb->retrans_lock), iflags);
387 void kern_ack_rcvd(struct neighbor *nb, __u32 seqno)
389 unsigned long iflags;
391 struct control_retrans *cr = 0;
392 struct retransmit_matchparam rm;
394 rm.seqno = seqno;
395 rm.nb = nb;
397 spin_lock_irqsave(&(nb->retrans_lock), iflags);
399 cr = (struct control_retrans *) htable_get(&retransmits, rm_to_key(&rm),
400 &rm);
402 if (cr == 0) {
403 printk(KERN_ERR "bogus/duplicate ack received");
404 goto out;
407 if (unlikely(htable_delete(&retransmits, rm_to_key(&rm), &rm,
408 free_control_retrans)))
409 BUG();
411 BUG_ON(cr->nb != nb);
413 list_del(&(cr->timeout_list));
415 out:
416 spin_unlock_irqrestore(&(nb->retrans_lock), iflags);
418 if (cr != 0) {
419 kref_put(&(cr->ref), free_control_retrans); /* htable_get */
420 kref_put(&(cr->ref), free_control_retrans); /* list */
424 static void padding(struct sk_buff *skb, int length)
426 char *dst;
427 if (length <= 0)
428 return;
429 dst = skb_put(skb, length);
430 BUG_ON(dst == 0);
431 memset(dst, KP_PADDING, length);
434 static int add_ack(struct sk_buff *skb, struct control_retrans *cr,
435 struct control_msg_out *cm, int spaceleft)
437 char *dst;
439 if (unlikely(spaceleft < 5))
440 return 0;
442 dst = skb_put(skb, 5);
443 BUG_ON(dst == 0);
445 dst[0] = KP_ACK;
446 put_u32(dst + 1, cm->msg.ack.seqno, 1);
448 atomic_dec(&(cm->nb->ucmcnt));
449 free_control_msg(cm);
451 return 5;
454 static int add_ack_conn(struct sk_buff *skb, struct control_retrans *cr,
455 struct control_msg_out *cm, int spaceleft)
457 char *dst;
458 int offset = 0;
460 if (unlikely(spaceleft < cm->length))
461 return 0;
463 dst = skb_put(skb, cm->length);
464 BUG_ON(dst == 0);
466 dst[offset] = KP_ACK_CONN;
467 offset++;
468 put_u32(dst + offset, cm->msg.ack_conn.conn_id, 1);
469 offset += 4;
470 dst[offset] = cm->msg.ack_conn.flags;
471 offset++;
473 if ((cm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_SEQNO) != 0) {
474 put_u32(dst + offset, cm->msg.ack_conn.seqno, 1);
475 offset += 4;
477 if ((cm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_WINDOW) != 0) {
478 BUG_ON(cm->msg.ack_conn.rconn == 0);
479 dst[offset] = get_window(cm->msg.ack_conn.rconn,
480 cm->nb);
481 offset++;
485 if (ooolen(cm->msg.ack_conn.flags) != 0) {
486 put_u32(dst + offset, cm->msg.ack_conn.seqno_ooo, 1);
487 offset += 4;
488 if (ooolen(cm->msg.ack_conn.flags) == 1) {
489 BUG_ON(cm->msg.ack_conn.length > 255);
490 dst[offset] = cm->msg.ack_conn.length;
491 offset += 1;
492 } else if (ooolen(cm->msg.ack_conn.flags) == 2) {
493 BUG_ON(cm->msg.ack_conn.length <= 255);
494 BUG_ON(cm->msg.ack_conn.length > 65535);
495 put_u16(dst + offset, cm->msg.ack_conn.length, 1);
496 offset += 2;
497 } else if (ooolen(cm->msg.ack_conn.flags) == 4) {
498 BUG_ON(cm->msg.ack_conn.length <= 65535);
499 put_u32(dst + offset, cm->msg.ack_conn.length, 1);
500 offset += 4;
501 } else {
502 BUG();
506 if ((cm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_CREDITS) != 0) {
507 __u16 value = cm->msg.ack_conn.decaytime + (
508 cm->msg.ack_conn.decaytime_seqno << 10);
510 BUG_ON(cm->msg.ack_conn.decaytime >= 1024);
511 BUG_ON(cm->msg.ack_conn.decaytime_seqno >= 64);
513 put_u16(dst + offset, value, 1);
514 offset += 2;
517 list_add_tail(&(cm->lh), &(cr->msgs));
519 BUG_ON(offset != cm->length);
520 return offset;
523 static int add_ping(struct sk_buff *skb, __u32 cookie,
524 int spaceleft)
526 char *dst;
528 if (unlikely(spaceleft < 5))
529 return 0;
531 dst = skb_put(skb, 5);
532 BUG_ON(dst == 0);
534 dst[0] = KP_PING;
535 put_u32(dst + 1, cookie, 0);
537 return 5;
540 static int add_pong(struct sk_buff *skb, struct control_retrans *cr,
541 struct control_msg_out *cm, int spaceleft)
543 char *dst;
545 if (unlikely(spaceleft < 9))
546 return 0;
548 dst = skb_put(skb, 9);
549 BUG_ON(dst == 0);
551 dst[0] = KP_PONG;
552 put_u32(dst + 1, cm->msg.pong.cookie, 0);
553 put_u32(dst + 5, 1000 * jiffies_to_msecs(jiffies -
554 cm->msg.pong.time_enqueued), 1);
557 atomic_dec(&(cm->nb->ucmcnt));
558 list_add_tail(&(cm->lh), &(cr->msgs));
560 return 9;
563 static __u16 get_credits(struct conn *sconn)
565 __u16 ret;
566 mutex_lock(&(sconn->reversedir->rcv_lock));
567 BUG_ON(sconn->reversedir->targettype != TARGET_OUT);
569 BUG_ON(sconn->reversedir->target.out.decaytime_last >= 1024);
570 BUG_ON(sconn->reversedir->target.out.decaytime_seqno >= 64);
571 ret = sconn->reversedir->target.out.decaytime_last + (
572 sconn->reversedir->target.out.decaytime_seqno <<
573 10);
574 mutex_unlock(&(sconn->reversedir->rcv_lock));
576 return ret;
579 static int add_connect(struct sk_buff *skb, struct control_retrans *cr,
580 struct control_msg_out *cm, int spaceleft)
582 char *dst;
584 if (unlikely(spaceleft < 12))
585 return 0;
587 dst = skb_put(skb, 12);
588 BUG_ON(dst == 0);
590 dst[0] = KP_CONNECT;
591 put_u32(dst + 1, cm->msg.connect.conn_id, 1);
592 put_u32(dst + 5, cm->msg.connect.init_seqno, 1);
593 BUG_ON(cm->msg.connect.sconn == 0);
594 dst[9] = get_window(cm->msg.connect.sconn, cm->nb);
595 put_u16(dst + 10, get_credits(cm->msg.connect.sconn), 1);
597 list_add_tail(&(cm->lh), &(cr->msgs));
599 return 12;
602 static int add_connect_success(struct sk_buff *skb, struct control_retrans *cr,
603 struct control_msg_out *cm, int spaceleft)
605 char *dst;
607 if (unlikely(spaceleft < 16))
608 return 0;
610 dst = skb_put(skb, 16);
611 BUG_ON(dst == 0);
613 dst[0] = KP_CONNECT_SUCCESS;
614 put_u32(dst + 1, cm->msg.connect_success.rcvd_conn_id, 1);
615 put_u32(dst + 5, cm->msg.connect_success.gen_conn_id, 1);
616 put_u32(dst + 9, cm->msg.connect_success.init_seqno, 1);
617 BUG_ON(cm->msg.connect_success.rconn == 0);
618 dst[13] = get_window(cm->msg.connect_success.rconn, cm->nb);
619 put_u16(dst + 14, get_credits(cm->msg.connect_success.rconn), 1);
621 list_add_tail(&(cm->lh), &(cr->msgs));
623 return 16;
626 static int add_reset_conn(struct sk_buff *skb, struct control_retrans *cr,
627 struct control_msg_out *cm, int spaceleft)
629 char *dst;
631 if (unlikely(spaceleft < 5))
632 return 0;
634 dst = skb_put(skb, 5);
635 BUG_ON(dst == 0);
637 dst[0] = KP_RESET_CONN;
638 put_u32(dst + 1, cm->msg.reset.conn_id, 1);
640 list_add_tail(&(cm->lh), &(cr->msgs));
642 return 5;
645 static int add_conndata(struct sk_buff *skb, struct control_retrans *cr,
646 struct control_msg_out *cm, int spaceleft,
647 struct control_msg_out **split_conndata, __u32 *sc_sendlen)
649 char *dst;
651 int totallen = cm->msg.conn_data.datalen + 11;
652 int putlen = min(totallen, spaceleft);
653 int dataputlen = putlen - 11;
655 BUG_ON(split_conndata == 0);
656 BUG_ON(sc_sendlen == 0);
658 if (dataputlen < 1 || (spaceleft < 25 && spaceleft < totallen))
659 return 0;
661 dst = skb_put(skb, putlen);
662 BUG_ON(dst == 0);
664 dst[0] = KP_CONN_DATA;
665 put_u32(dst + 1, cm->msg.conn_data.conn_id, 1);
666 put_u32(dst + 5, cm->msg.conn_data.seqno, 1);
667 put_u16(dst + 9, dataputlen, 1);
669 memcpy(dst + 11, cm->msg.conn_data.data, dataputlen);
671 if (cm->msg.conn_data.datalen == dataputlen) {
672 list_add_tail(&(cm->lh), &(cr->msgs));
673 } else {
674 *split_conndata = cm;
675 *sc_sendlen = dataputlen;
678 return putlen;
681 static int add_connid_unknown(struct sk_buff *skb, struct control_retrans *cr,
682 struct control_msg_out *cm, int spaceleft)
684 char *dst;
686 if (unlikely(spaceleft < 5))
687 return 0;
689 dst = skb_put(skb, 5);
690 BUG_ON(dst == 0);
692 dst[0] = KP_CONNID_UNKNOWN;
693 put_u32(dst + 1, cm->msg.connid_unknown.conn_id, 1);
695 list_add_tail(&(cm->lh), &(cr->msgs));
697 return 5;
700 static int add_ping_all_conns(struct sk_buff *skb, struct control_retrans *cr,
701 struct control_msg_out *cm, int spaceleft)
703 char *dst;
705 if (unlikely(spaceleft < 1))
706 return 0;
708 dst = skb_put(skb, 1);
709 BUG_ON(dst == 0);
711 dst[0] = KP_PING_ALL_CONNS;
713 list_add_tail(&(cm->lh), &(cr->msgs));
715 return 1;
718 static int add_set_max_cmsg_dly(struct sk_buff *skb, struct control_retrans *cr,
719 struct control_msg_out *cm, int spaceleft)
721 char *dst;
723 if (unlikely(spaceleft < 5))
724 return 0;
726 dst = skb_put(skb, 5);
727 BUG_ON(dst == 0);
729 dst[0] = KP_SET_MAX_CMSG_DELAY;
730 put_u32(dst + 1, cm->msg.set_max_cmsg_delay.delay, 1);
732 list_add_tail(&(cm->lh), &(cr->msgs));
734 return 5;
737 static int add_message(struct sk_buff *skb, struct control_retrans *cr,
738 struct control_msg_out *cm, int spaceleft,
739 struct control_msg_out **split_conndata, __u32 *sc_sendlen)
741 BUG_ON(split_conndata != 0 && *split_conndata != 0);
742 BUG_ON(sc_sendlen != 0 && *sc_sendlen != 0);
744 switch (cm->type) {
745 case MSGTYPE_ACK:
746 return add_ack(skb, cr, cm, spaceleft);
747 case MSGTYPE_ACK_CONN:
748 return add_ack_conn(skb, cr, cm, spaceleft);
749 case MSGTYPE_PONG:
750 return add_pong(skb, cr, cm, spaceleft);
751 case MSGTYPE_CONNECT:
752 return add_connect(skb, cr, cm, spaceleft);
753 case MSGTYPE_CONNECT_SUCCESS:
754 return add_connect_success(skb, cr, cm, spaceleft);
755 case MSGTYPE_RESET_CONN:
756 return add_reset_conn(skb, cr, cm, spaceleft);
757 case MSGTYPE_CONNDATA:
758 return add_conndata(skb, cr, cm, spaceleft, split_conndata,
759 sc_sendlen);
760 case MSGTYPE_CONNID_UNKNOWN:
761 return add_connid_unknown(skb, cr, cm, spaceleft);
762 case MSGTYPE_PING_ALL_CONNS:
763 return add_ping_all_conns(skb, cr, cm, spaceleft);
764 case MSGTYPE_SET_MAX_CMSG_DELAY:
765 return add_set_max_cmsg_dly(skb, cr, cm, spaceleft);
766 default:
767 BUG();
769 BUG();
770 return 0;
773 static __u32 recount_ping_conns(struct neighbor *nb)
775 __u32 cnt;
776 struct list_head *curr = nb->next_ping_conn->target.out.nb_list.next;
777 while (curr != &(nb->snd_conn_list)) {
778 cnt++;
779 BUG_ON(cnt > 1000000000);
781 return cnt;
784 static __u32 __send_messages_pc(struct neighbor *nb, struct sk_buff *skb,
785 struct control_retrans *cr, int spaceleft)
787 __u32 length = 0;
788 mutex_lock(&(nb->conn_list_lock));
789 while (nb->next_ping_conn != 0) {
790 struct conn *rconn;
791 struct conn *sconn;
792 struct list_head *next;
793 struct control_msg_out *cm;
794 int rc;
796 rconn = nb->next_ping_conn;
797 sconn = rconn->reversedir;
799 BUG_ON(rconn->targettype != TARGET_OUT);
800 BUG_ON(sconn->sourcetype != SOURCE_IN);
802 if (unlikely(rconn->target.out.conn_id == 0))
803 goto next;
805 if (nb->ping_conns_remaining == 0) {
806 atomic_set(&(sconn->source.in.pong_awaiting), 1);
807 nb->pong_conns_expected++;
808 nb->ping_conns_remaining--;
809 if (unlikely(nb->ping_conns_remaining == 0))
810 nb->ping_conns_remaining =
811 recount_ping_conns(nb);
812 } else {
813 if (likely(atomic_read(&(
814 sconn->source.in.pong_awaiting)) == 0))
815 goto next;
816 nb->ping_conns_remaining--;
817 if (unlikely(nb->ping_conns_retrans_remaining == 0))
818 nb->ping_conns_retrans_remaining =
819 recount_ping_conns(nb);
822 cm = alloc_control_msg(nb, ACM_PRIORITY_LOW);
823 cm->type = MSGTYPE_ACK_CONN;
824 BUG_ON(rconn->reversedir->sourcetype != SOURCE_IN);
825 kref_get(&(rconn->reversedir->ref));
826 cm->msg.ack_conn.rconn = rconn->reversedir;
827 cm->msg.ack_conn.flags = KP_ACK_CONN_FLAGS_PING;
828 cm->msg.ack_conn.conn_id = rconn->target.out.conn_id;
829 cm->length = 6 + ack_conn_len(cm->msg.ack_conn.flags);
831 rc = add_message(skb, cr, cm, spaceleft - length, 0, 0);
832 if (rc == 0)
833 break;
835 length = rc;
836 next:
837 next = rconn->target.out.nb_list.next;
838 nb->next_ping_conn = container_of(next, struct conn,
839 target.out.nb_list);
840 if (next == &(nb->snd_conn_list)) {
841 nb->next_ping_conn = 0;
842 nb->ping_conns_remaining = 0;
845 if (unlikely(length != 0)) {
846 nb->ping_conn_completed = jiffies;
848 mutex_unlock(&(nb->conn_list_lock));
849 return length;
852 static __u32 __send_messages(struct neighbor *nb, struct sk_buff *skb,
853 struct control_retrans *cr, int spaceleft, int urgentonly,
854 struct control_msg_out **split_conndata, __u32 *sc_sendlen)
856 __u32 length = 0;
857 while (!list_empty(&(nb->ucontrol_msgs_out)) || (!urgentonly &&
858 !list_empty(&(nb->control_msgs_out)))) {
859 int rc;
861 int urgent = !list_empty(&(nb->ucontrol_msgs_out));
863 struct control_msg_out *cm;
865 if (urgent)
866 cm = container_of(nb->ucontrol_msgs_out.next,
867 struct control_msg_out, lh);
868 else
869 cm = container_of(nb->control_msgs_out.next,
870 struct control_msg_out, lh);
872 list_del(&(cm->lh));
873 if (urgent)
874 nb->ucmlength -= cm->length;
875 else
876 nb->cmlength -= cm->length;
877 mutex_unlock(&(nb->cmsg_lock));
878 rc = add_message(skb, cr, cm, spaceleft - length,
879 split_conndata, sc_sendlen);
880 mutex_lock(&(nb->cmsg_lock));
882 if (rc == 0) {
883 if (urgent) {
884 list_add(&(cm->lh), &(nb->ucontrol_msgs_out));
885 nb->ucmlength += cm->length;
886 } else {
887 list_add(&(cm->lh), &(nb->control_msgs_out));
888 nb->cmlength += cm->length;
890 break;
893 length += rc;
896 return length;
899 static int msgtype_present(struct neighbor *nb, __u8 type)
901 struct list_head *curr;
903 curr = nb->control_msgs_out.next;
904 while (curr != &(nb->control_msgs_out)) {
905 struct control_msg_out *cm = container_of(curr,
906 struct control_msg_out, lh);
908 if (cm->type == MSGTYPE_PING_ALL_CONNS)
909 return 1;
911 curr = curr->next;
914 return 0;
917 static int ping_all_conns_needed(struct neighbor *nb)
919 if (likely(nb->ping_all_conns == 0))
920 return 0;
922 if (msgtype_present(nb, MSGTYPE_PING_ALL_CONNS))
923 return 0;
925 return 1;
928 static int __send_messages_smcd(struct neighbor *nb, struct sk_buff *skb,
929 struct control_retrans *cr, int spaceleft)
931 struct control_msg_out *cm;
932 int rc;
934 cm = alloc_control_msg(nb, ACM_PRIORITY_MED);
936 if (unlikely(cm == 0))
937 return 0;
939 cm->type = MSGTYPE_SET_MAX_CMSG_DELAY;
940 cm->msg.set_max_cmsg_delay.delay = CMSG_INTERVAL_MS * 10;
941 cm->length = 5;
943 rc = add_message(skb, cr, cm, spaceleft, 0, 0);
945 nb->max_cmsg_delay_sent = 1;
947 return rc;
950 static int __send_messages_pac(struct neighbor *nb, struct sk_buff *skb,
951 struct control_retrans *cr, int spaceleft)
953 struct control_msg_out *cm;
954 int rc;
956 cm = alloc_control_msg(nb, ACM_PRIORITY_MED);
958 if (unlikely(cm == 0))
959 return 0;
961 cm->type = MSGTYPE_PING_ALL_CONNS;
962 cm->length = 1;
964 rc = add_message(skb, cr, cm, spaceleft, 0, 0);
966 nb->ping_all_conns = 0;
967 return rc;
971 static int _send_messages(struct neighbor *nb, struct sk_buff *skb, int ping,
972 struct control_retrans *cr, int spaceleft, int urgentonly)
974 int rc;
975 int length = 0;
976 __u32 pingcookie = 0;
977 struct control_msg_out *split_conndata = 0;
978 __u32 sc_sendlen = 0;
980 mutex_lock(&(nb->cmsg_lock));
982 if (ping != 0) {
983 int rc;
984 pingcookie = add_ping_req(nb);
985 rc = add_ping(skb, pingcookie, spaceleft - length);
986 BUG_ON(rc == 0);
987 length += rc;
990 if (likely(urgentonly == 0) && unlikely(ping_all_conns_needed(nb) != 0))
991 length += __send_messages_pac(nb, skb, cr, spaceleft - length);
993 if (likely(urgentonly == 0) && unlikely(nb->max_cmsg_delay_sent == 0))
994 length += __send_messages_smcd(nb, skb, cr, spaceleft - length);
997 length += __send_messages(nb, skb, cr, spaceleft - length, urgentonly,
998 &split_conndata, &sc_sendlen);
1000 if (likely(urgentonly == 0))
1001 length += __send_messages_pc(nb, skb, cr, spaceleft - length);
1003 mutex_unlock(&(nb->cmsg_lock));
1005 if (unlikely(length > spaceleft))
1006 printk(KERN_ERR "error cor/kpacket_gen: length > spaceleft!?");
1008 padding(skb, spaceleft - length);
1010 rc = dev_queue_xmit(skb);
1012 if (rc != 0) {
1013 unadd_ping_req(nb, pingcookie);
1015 while (list_empty(&(cr->msgs)) == 0) {
1016 struct control_msg_out *cm = container_of(cr->msgs.prev,
1017 struct control_msg_out, lh);
1018 list_del(&(cm->lh));
1019 add_control_msg(cm, 1);
1022 if (split_conndata != 0) {
1023 add_control_msg(split_conndata, 1);
1026 kref_put(&(cr->ref), free_control_retrans);
1027 } else {
1028 struct list_head *curr = cr->msgs.next;
1030 while(curr != &(cr->msgs)) {
1031 struct control_msg_out *cm = container_of(curr,
1032 struct control_msg_out, lh);
1034 curr = curr->next;
1036 if (cm->type == MSGTYPE_CONNDATA) {
1037 list_del(&(cm->lh));
1038 kfree(cm->msg.conn_data.data_orig);
1039 free_control_msg(cm);
1043 if (split_conndata != 0) {
1044 BUG_ON(sc_sendlen == 0);
1045 BUG_ON(sc_sendlen >=
1046 split_conndata->msg.conn_data.datalen);
1048 split_conndata->msg.conn_data.data += sc_sendlen;
1049 split_conndata->msg.conn_data.datalen -= sc_sendlen;
1051 send_conndata(split_conndata,
1052 split_conndata->msg.conn_data.conn_id,
1053 split_conndata->msg.conn_data.seqno,
1054 split_conndata->msg.conn_data.data_orig,
1055 split_conndata->msg.conn_data.data,
1056 split_conndata->msg.conn_data.datalen);
1060 if (list_empty(&(cr->msgs)))
1061 kref_put(&(cr->ref), free_control_retrans);
1062 else
1063 schedule_retransmit(cr, nb);
1066 return rc;
1069 static __u32 get_total_messages_length(struct neighbor *nb, int ping,
1070 int urgentonly)
1072 __u32 length = nb->ucmlength;
1074 if (likely(urgentonly == 0)) {
1075 length += nb->cmlength + nb->ping_conns_remaining * 5;
1076 if (likely(nb->ping_conns_remaining == 0)) {
1077 if (likely(nb->ping_conns_retrans_remaining == 0) &&
1078 unlikely(nb->pong_conns_expected !=0) &&
1079 time_before(nb->ping_conn_completed,
1080 jiffies + msecs_to_jiffies(
1081 PING_ALL_CONNS_TIMEOUT) +
1082 usecs_to_jiffies(((__u32) atomic_read(&(
1083 nb->latency))) * 2 + ((__u32)
1084 atomic_read(&(nb->max_remote_cmsg_delay)
1085 )))))
1086 nb->ping_conns_retrans_remaining =
1087 nb->pong_conns_expected;
1089 if (unlikely(nb->ping_conns_retrans_remaining >
1090 nb->pong_conns_expected))
1091 nb->ping_conns_retrans_remaining =
1092 nb->pong_conns_expected;
1094 length += nb->ping_conns_retrans_remaining * 5;
1096 if (unlikely(ping_all_conns_needed(nb) != 0))
1097 length += 1;
1098 if (unlikely(nb->max_cmsg_delay_sent == 0))
1099 length += 5;
1101 if (ping == 2 || (length > 0 && ping != 0))
1102 length += 5;
1104 return length;
1107 int send_messages(struct neighbor *nb, int allmsgs, int resume)
1109 int rc = 0;
1110 int ping;
1111 int targetmss = mss(nb);
1113 int nbstate = get_neigh_state(nb);
1114 int urgentonly = (nbstate != NEIGHBOR_STATE_ACTIVE);
1116 mutex_lock(&(nb->cmsg_lock));
1118 if (resume)
1119 allmsgs = nb->kp_allmsgs;
1121 ping = time_to_send_ping(nb);
1123 while (1) {
1124 __u32 length;
1126 __u32 seqno;
1127 struct sk_buff *skb;
1128 struct control_retrans *cr;
1130 BUG_ON(list_empty(&(nb->control_msgs_out)) &&
1131 (nb->cmlength != 0));
1132 BUG_ON((list_empty(&(nb->control_msgs_out)) == 0) &&
1133 (nb->cmlength == 0));
1134 BUG_ON(list_empty(&(nb->ucontrol_msgs_out)) &&
1135 (nb->ucmlength != 0));
1136 BUG_ON((list_empty(&(nb->ucontrol_msgs_out)) == 0) &&
1137 (nb->ucmlength == 0));
1138 BUG_ON(nb->cmlength < 0);
1139 BUG_ON(nb->ucmlength < 0);
1141 length = get_total_messages_length(nb, ping, urgentonly);
1143 if (length == 0)
1144 break;
1146 if (length < targetmss && allmsgs == 0)
1147 break;
1149 seqno = atomic_add_return(1, &(nb->kpacket_seqno));
1151 if (length > targetmss)
1152 length = targetmss;
1154 mutex_unlock(&(nb->cmsg_lock));
1155 skb = create_packet(nb, length, GFP_KERNEL, 0, seqno);
1156 if (unlikely(skb == 0)) {
1157 printk(KERN_ERR "cor: send_messages: cannot allocate "
1158 "skb (out of memory?)");
1159 goto oom;
1162 cr = kmem_cache_alloc(controlretrans_slab, GFP_KERNEL);
1163 if (unlikely(cr == 0)) {
1164 kfree_skb(skb);
1165 printk(KERN_ERR "cor: send_messages: cannot allocate "
1166 "control_retrans (out of memory?)");
1167 goto oom;
1169 memset(cr, 0, sizeof(struct control_retrans));
1170 kref_init(&(cr->ref));
1171 cr->nb = nb;
1172 cr->seqno = seqno;
1173 INIT_LIST_HEAD(&(cr->msgs));
1175 rc = _send_messages(nb, skb, ping, cr, length, urgentonly);
1176 ping = 0;
1178 mutex_lock(&(nb->cmsg_lock));
1180 if (rc != 0)
1181 break;
1184 if (0) {
1185 oom:
1186 mutex_lock(&(nb->cmsg_lock));
1189 if (rc != 0) {
1190 if (resume == 0) {
1191 nb->kp_allmsgs = nb->kp_allmsgs || allmsgs;
1192 qos_enqueue(nb->dev, &(nb->rb_kp), QOS_CALLER_KPACKET);
1194 } else if (allmsgs) {
1195 nb->kp_allmsgs = 0;
1198 mutex_unlock(&(nb->cmsg_lock));
1200 if (allmsgs)
1201 schedule_controlmsg_timerfunc(nb);
1203 return rc;
1206 static void controlmsg_timerfunc(struct work_struct *work)
1208 struct neighbor *nb = container_of(to_delayed_work(work),
1209 struct neighbor, cmsg_timer);
1210 unsigned long jiffies_tmp = jiffies;
1212 mutex_lock(&(nb->cmsg_lock));
1214 if (time_after(nb->timeout, jiffies_tmp)) {
1215 INIT_DELAYED_WORK(&(nb->cmsg_timer), controlmsg_timerfunc);
1216 schedule_delayed_work(&(nb->cmsg_timer), nb->timeout -
1217 jiffies_tmp);
1218 mutex_unlock(&(nb->cmsg_lock));
1219 return;
1222 mutex_unlock(&(nb->cmsg_lock));
1224 send_messages(nb, 1, 0);
1225 kref_put(&(nb->ref), neighbor_free);
1228 void schedule_controlmsg_timerfunc(struct neighbor *nb)
1230 __u64 jiffies = get_jiffies_64();
1231 long long delay;
1233 int state = get_neigh_state(nb);
1235 if (unlikely(state == NEIGHBOR_STATE_KILLED))
1236 return;
1238 mutex_lock(&(nb->cmsg_lock));
1239 nb->timeout += msecs_to_jiffies(CMSG_INTERVAL_MS);
1241 delay = nb->timeout - jiffies;
1242 if (delay < 0) {
1243 delay = 1;
1244 nb->timeout = jiffies;
1247 INIT_DELAYED_WORK(&(nb->cmsg_timer), controlmsg_timerfunc);
1248 schedule_delayed_work(&(nb->cmsg_timer), delay);
1249 mutex_unlock(&(nb->cmsg_lock));
1250 kref_get(&(nb->ref));
1253 static void free_oldest_ucm(struct neighbor *nb)
1255 struct control_msg_out *cm = container_of(nb->ucontrol_msgs_out.next,
1256 struct control_msg_out, lh);
1258 BUG_ON(list_empty(&(nb->ucontrol_msgs_out)));
1259 BUG_ON(isurgent(cm) == 0);
1261 list_del(&(cm->lh));
1262 nb->ucmlength -= cm->length;
1263 atomic_dec(&(nb->ucmcnt));
1264 free_control_msg(cm);
1267 static void add_control_msg(struct control_msg_out *cm, int retrans)
1269 int nbstate;
1271 BUG_ON(cm->nb == 0);
1273 nbstate = get_neigh_state(cm->nb);
1275 BUG_ON(cm == 0);
1276 BUG_ON(cm->lh.next != LIST_POISON1 || cm->lh.prev != LIST_POISON2);
1278 mutex_lock(&(cm->nb->cmsg_lock));
1280 if (isurgent(cm)) {
1281 long msgs;
1283 msgs = atomic_inc_return(&(cm->nb->ucmcnt));
1284 BUG_ON(msgs <= 0);
1286 if (unlikely(retrans)) {
1287 if (msgs > MAX_URGENT_CMSGS_PER_NEIGH_RETRANSALLOW ||
1288 msgs > MAX_URGENT_CMSGS_PER_NEIGH) {
1289 atomic_dec(&(cm->nb->ucmcnt));
1290 free_control_msg(cm);
1291 goto out;
1294 cm->nb->ucmlength += cm->length;
1295 list_add(&(cm->lh), &(cm->nb->ucontrol_msgs_out));
1296 } else {
1297 if (msgs > MAX_URGENT_CMSGS_PER_NEIGH) {
1298 free_oldest_ucm(cm->nb);
1301 cm->nb->ucmlength += cm->length;
1302 list_add_tail(&(cm->lh), &(cm->nb->ucontrol_msgs_out));
1304 } else {
1305 cm->nb->cmlength += cm->length;
1306 list_add_tail(&(cm->lh), &(cm->nb->control_msgs_out));
1309 #warning todo measure inter message interval
1310 if (unlikely((nbstate == NEIGHBOR_STATE_ACTIVE ? cm->nb->cmlength : 0) +
1311 cm->nb->ucmlength >= mss(cm->nb)))
1312 send_messages(cm->nb, 0, 0);
1314 out:
1315 mutex_unlock(&(cm->nb->cmsg_lock));
1318 void send_pong(struct neighbor *nb, __u32 cookie)
1320 struct control_msg_out *cm = _alloc_control_msg(nb, 0, 1);
1322 if (unlikely(cm == 0))
1323 return;
1325 cm->nb = nb;
1326 cm->type = MSGTYPE_PONG;
1327 cm->msg.pong.cookie = cookie;
1328 cm->msg.pong.time_enqueued = jiffies;
1329 cm->length = 9;
1330 add_control_msg(cm, 0);
1333 void send_reset_conn(struct control_msg_out *cm, __u32 conn_id)
1335 cm->type = MSGTYPE_RESET_CONN;
1336 cm->msg.reset.conn_id = conn_id;
1337 cm->length = 5;
1338 add_control_msg(cm, 0);
1341 void send_ack(struct neighbor *nb, __u32 seqno)
1343 struct control_msg_out *cm = _alloc_control_msg(nb, 0, 1);
1345 if (unlikely(cm == 0))
1346 return;
1348 cm->nb = nb;
1349 cm->type = MSGTYPE_ACK;
1350 cm->msg.ack.seqno = seqno;
1351 cm->length = 5;
1352 add_control_msg(cm, 0);
1355 #warning todo conn naming/locking
1356 void send_ack_conn(struct control_msg_out *cm, struct conn *rconn,
1357 __u32 conn_id, __u32 seqno)
1359 cm->type = MSGTYPE_ACK_CONN;
1360 kref_get(&(rconn->ref));
1361 BUG_ON(rconn->sourcetype != SOURCE_IN);
1362 cm->msg.ack_conn.flags = KP_ACK_CONN_FLAGS_SEQNO |
1363 KP_ACK_CONN_FLAGS_WINDOW;
1364 cm->msg.ack_conn.rconn = rconn;
1365 cm->msg.ack_conn.conn_id = conn_id;
1366 cm->msg.ack_conn.seqno = seqno;
1367 cm->length = 6 + ack_conn_len(cm->msg.ack_conn.flags);
1368 add_control_msg(cm, 0);
1371 void send_ack_conn_ooo(struct control_msg_out *cm, struct conn *rconn,
1372 __u32 conn_id, __u32 seqno_ooo, __u32 length)
1374 cm->type = MSGTYPE_ACK_CONN;
1375 kref_get(&(rconn->ref));
1376 BUG_ON(rconn->sourcetype != SOURCE_IN);
1377 cm->msg.ack_conn.flags = ooolen_to_flags(length);
1378 cm->msg.ack_conn.rconn = rconn;
1379 cm->msg.ack_conn.conn_id = conn_id;
1380 cm->msg.ack_conn.seqno_ooo = seqno_ooo;
1381 cm->msg.ack_conn.length = length;
1382 cm->length = 6 + ack_conn_len(cm->msg.ack_conn.flags);
1383 add_control_msg(cm, 0);
1386 void send_decaytime(struct conn *rconn, int force, __u16 decaytime)
1388 struct control_msg_out *cm;
1390 #warning todo unforced send
1391 if (force == 0)
1392 return;
1394 cm = alloc_control_msg(rconn->target.out.nb, ACM_PRIORITY_MED);
1396 if (cm == 0)
1397 return;
1399 cm->type = MSGTYPE_ACK_CONN;
1400 kref_get(&(rconn->ref));
1401 BUG_ON(rconn->targettype != TARGET_OUT);
1402 cm->msg.ack_conn.flags = KP_ACK_CONN_FLAGS_CREDITS;
1403 cm->msg.ack_conn.rconn = rconn->reversedir;
1404 cm->msg.ack_conn.conn_id = rconn->target.out.conn_id;
1405 cm->msg.ack_conn.decaytime_seqno = rconn->target.out.decaytime_seqno;
1406 cm->msg.ack_conn.decaytime = decaytime;
1408 cm->length = 6 + ack_conn_len(cm->msg.ack_conn.flags);
1409 add_control_msg(cm, 0);
1411 rconn->target.out.decaytime_last = decaytime;
1412 rconn->target.out.decaytime_seqno = (rconn->target.out.decaytime_seqno +
1413 1) % 64;
1414 rconn->target.out.decaytime_send_allowed = 0;
1417 void send_connect_success(struct control_msg_out *cm, __u32 rcvd_conn_id,
1418 __u32 gen_conn_id, __u32 init_seqno, struct conn *rconn)
1420 cm->type = MSGTYPE_CONNECT_SUCCESS;
1421 cm->msg.connect_success.rcvd_conn_id = rcvd_conn_id;
1422 cm->msg.connect_success.gen_conn_id = gen_conn_id;
1423 cm->msg.connect_success.init_seqno = init_seqno;
1424 kref_get(&(rconn->ref));
1425 BUG_ON(rconn->sourcetype != SOURCE_IN);
1426 cm->msg.connect_success.rconn = rconn;
1427 cm->length = 16;
1428 add_control_msg(cm, 0);
1431 void send_connect_nb(struct control_msg_out *cm, __u32 conn_id,
1432 __u32 init_seqno, struct conn *sconn)
1434 cm->type = MSGTYPE_CONNECT;
1435 cm->msg.connect.conn_id = conn_id;
1436 cm->msg.connect.init_seqno = init_seqno;
1437 kref_get(&(sconn->ref));
1438 BUG_ON(sconn->sourcetype != SOURCE_IN);
1439 cm->msg.connect.sconn = sconn;
1440 cm->length = 12;
1441 add_control_msg(cm, 0);
1444 void send_conndata(struct control_msg_out *cm, __u32 conn_id, __u32 seqno,
1445 char *data_orig, char *data, __u32 datalen)
1447 cm->type = MSGTYPE_CONNDATA;
1448 cm->msg.conn_data.conn_id = conn_id;
1449 cm->msg.conn_data.seqno = seqno;
1450 cm->msg.conn_data.data_orig = data_orig;
1451 cm->msg.conn_data.data = data;
1452 cm->msg.conn_data.datalen = datalen;
1453 cm->length = 11 + datalen;
1454 add_control_msg(cm, 0);
1457 void send_connid_unknown(struct neighbor *nb, __u32 conn_id)
1459 struct control_msg_out *cm = alloc_control_msg(nb, ACM_PRIORITY_MED);
1460 if (unlikely(cm == 0))
1461 return;
1462 cm->type = MSGTYPE_CONNID_UNKNOWN;
1463 cm->msg.connid_unknown.conn_id = conn_id;
1464 cm->length = 5;
1465 add_control_msg(cm, 0);
1468 #warning todo remove ping_all_conns and use timeout instead, change conn_list_lock to a spinlock and remove rcv_conn_list+num_send_conns
1469 void send_ping_all_conns(struct neighbor *nb)
1471 mutex_lock(&(nb->cmsg_lock));
1472 nb->ping_all_conns = 1;
1473 mutex_unlock(&(nb->cmsg_lock));
1478 static int matches_connretrans(void *htentry, void *searcheditem)
1480 struct control_retrans *cr = (struct control_retrans *) htentry;
1481 struct retransmit_matchparam *rm = (struct retransmit_matchparam *)
1482 searcheditem;
1484 return rm->nb == cr->nb && rm->seqno == cr->seqno;
1487 void __init cor_kgen_init(void)
1489 controlmsg_slab = kmem_cache_create("cor_controlmsg",
1490 sizeof(struct control_msg_out), 8, 0, 0);
1491 controlretrans_slab = kmem_cache_create("cor_controlretransmsg",
1492 sizeof(struct control_retrans), 8, 0, 0);
1493 htable_init(&retransmits, matches_connretrans,
1494 offsetof(struct control_retrans, htab_entry),
1495 offsetof(struct control_retrans, ref));
1498 MODULE_LICENSE("GPL");