shorter ping interval a few seconds after a neighbor gets idle, more accurate latency...
[cor_2_6_31.git] / net / cor / kpacket_gen.c
blob453d2e29ddfd9f828f5c4bd4e90118e87027e916
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2011 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <asm/byteorder.h>
23 #include "cor.h"
25 /* not sent over the network - internal meaning only */
26 #define MSGTYPE_PONG 1
27 #define MSGTYPE_ACK 2
28 #define MSGTYPE_ACK_CONN 3
29 #define MSGTYPE_CONNECT 4
30 #define MSGTYPE_CONNECT_SUCCESS 5
31 #define MSGTYPE_RESET_CONN 6
32 #define MSGTYPE_CONNDATA 7
33 #define MSGTYPE_CONNID_UNKNOWN 8
34 #define MSGTYPE_SET_MAX_CMSG_DELAY 9
36 #define MSGTYPE_PONG_TIMEENQUEUED 1
37 #define MSGTYPE_PONG_RESPDELAY 2
39 struct control_msg_out{
40 struct list_head lh; /* either neighbor or control_retrans_packet */
41 struct neighbor *nb;
43 struct kref ref;
45 unsigned long timeout;
47 __u32 length;
49 __u8 type;
50 union{
51 struct{
53 __u32 cookie;
54 __u8 type;
56 union {
57 ktime_t time_enqueued;
58 __u32 respdelay;
59 } delaycomp;
60 }pong;
62 struct{
63 __u32 seqno;
64 }ack;
66 struct{
67 struct conn *rconn;
68 __u32 conn_id;
69 __u32 seqno;
70 __u32 seqno_ooo;
71 __u32 length;
73 __u8 decaytime_seqno;
74 __u16 decaytime;
76 __u8 flags;
77 }ack_conn;
79 struct{
80 __u32 conn_id;
81 __u32 init_seqno;
82 struct conn *sconn;
83 }connect;
85 struct{
86 __u32 rcvd_conn_id;
87 __u32 gen_conn_id;
88 __u32 init_seqno;
89 struct conn *rconn;
90 }connect_success;
92 struct{
93 struct htab_entry htab_entry;
94 __u32 conn_id_reset;
95 __u32 conn_id_unknown;
96 }reset_connidunknown;
98 struct{
99 __u32 conn_id;
100 __u32 seqno;
101 char *data_orig;
102 char *data;
103 __u32 datalen;
104 }conn_data;
106 struct{
107 __u32 delay;
108 }set_max_cmsg_delay;
109 }msg;
112 struct control_retrans {
113 struct kref ref;
115 struct neighbor *nb;
116 __u32 seqno;
118 unsigned long timeout;
120 struct list_head msgs;
122 struct htab_entry htab_entry;
123 struct list_head timeout_list;
126 struct unknownconnid_matchparam {
127 struct neighbor *nb;
128 __u32 conn_id;
131 struct retransmit_matchparam {
132 struct neighbor *nb;
133 __u32 seqno;
137 struct kmem_cache *controlmsg_slab;
138 struct kmem_cache *controlretrans_slab;
140 static struct htable retransmits;
142 DEFINE_SPINLOCK(unknown_connids_lock);
143 static struct htable unknown_connids;
145 atomic_t cmcnt = ATOMIC_INIT(0);
148 static void add_control_msg(struct control_msg_out *msg, int retrans);
151 static __u32 ucm_to_key(struct unknownconnid_matchparam *ucm)
153 return ((__u32)((long) ucm->nb)) ^ ucm->conn_id;
156 static __u32 rm_to_key(struct retransmit_matchparam *rm)
158 return ((__u32)((long) rm->nb)) ^ rm->seqno;
161 static inline int isurgent(struct control_msg_out *cm)
163 if (unlikely(cm->type == MSGTYPE_PONG || cm->type == MSGTYPE_ACK))
164 return 1;
165 return 0;
168 static struct control_msg_out *__alloc_control_msg(void)
170 struct control_msg_out *cm = kmem_cache_alloc(controlmsg_slab,
171 GFP_KERNEL);
172 if (unlikely(cm == 0))
173 return 0;
174 memset(cm, 0, sizeof(struct control_msg_out));
175 cm->lh.next = LIST_POISON1;
176 cm->lh.prev = LIST_POISON2;
177 kref_init(&(cm->ref));
178 return cm;
181 static int calc_limit(int limit, int priority)
183 if (priority == ACM_PRIORITY_LOW)
184 return (limit+2)/3;
185 else if (priority == ACM_PRIORITY_MED)
186 return (limit * 2 + 1)/3;
187 else if (priority == ACM_PRIORITY_HIGH)
188 return limit;
189 else
190 BUG();
193 int may_alloc_control_msg(struct neighbor *nb, int priority)
195 long packets1 = atomic_read(&(nb->cmcnt));
196 long packets2 = atomic_read(&(cmcnt));
198 BUG_ON(packets1 < 0);
199 BUG_ON(packets2 < 0);
201 if (packets1 < calc_limit(GUARANTEED_CMSGS_PER_NEIGH, priority))
202 return 1;
204 if (unlikely(unlikely(packets2 >= calc_limit(MAX_CMSGS_PER_NEIGH,
205 priority)) || unlikely(packets1 >= (
206 calc_limit(MAX_CMSGS_PER_NEIGH, priority) *
207 (MAX_CMSGS - packets2) / MAX_CMSGS))))
208 return 0;
209 return 1;
212 static struct control_msg_out *_alloc_control_msg(struct neighbor *nb,
213 int priority, int urgent)
215 struct control_msg_out *cm = 0;
217 BUG_ON(nb == 0);
219 if (urgent == 0) {
220 long packets1 = atomic_inc_return(&(nb->cmcnt));
221 long packets2 = atomic_inc_return(&(cmcnt));
223 BUG_ON(packets1 <= 0);
224 BUG_ON(packets2 <= 0);
226 if (packets1 <= calc_limit(GUARANTEED_CMSGS_PER_NEIGH,
227 priority))
228 goto alloc;
230 if (unlikely(unlikely(packets2 > calc_limit(MAX_CMSGS_PER_NEIGH,
231 priority)) || unlikely(packets1 > (
232 calc_limit(MAX_CMSGS_PER_NEIGH, priority) *
233 (MAX_CMSGS - packets2) / MAX_CMSGS))))
234 goto full;
237 alloc:
238 cm = __alloc_control_msg();
239 if (unlikely(cm == 0))
240 goto full;
241 cm->nb = nb;
243 if (0) {
244 full:
245 if (urgent == 0) {
246 atomic_dec(&(nb->cmcnt));
247 atomic_dec(&(cmcnt));
250 return cm;
253 struct control_msg_out *alloc_control_msg(struct neighbor *nb, int priority)
255 return _alloc_control_msg(nb, priority, 0);
258 static void cmsg_kref_free(struct kref *ref)
260 struct control_msg_out *cm = container_of(ref, struct control_msg_out,
261 ref);
262 kmem_cache_free(controlmsg_slab, cm);
265 void free_control_msg(struct control_msg_out *cm)
267 if (isurgent(cm) == 0) {
268 atomic_dec(&(cm->nb->cmcnt));
269 atomic_dec(&(cmcnt));
272 if (cm->type == MSGTYPE_ACK_CONN) {
273 struct conn *sconn = cm->msg.ack_conn.rconn->reversedir;
274 BUG_ON(cm->msg.ack_conn.rconn == 0);
275 BUG_ON(sconn->targettype != TARGET_OUT);
276 if ((cm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_CREDITS) != 0 &&
277 sconn->target.out.decaytime_send_allowed != 0) {
278 sconn->target.out.decaytime_send_allowed = 0;
279 refresh_conn_credits(sconn, 0, 0);
282 kref_put(&(cm->msg.ack_conn.rconn->ref), free_conn);
283 cm->msg.ack_conn.rconn = 0;
284 } else if (cm->type == MSGTYPE_CONNECT) {
285 BUG_ON(cm->msg.connect.sconn == 0);
286 kref_put(&(cm->msg.connect.sconn->ref), free_conn);
287 cm->msg.connect.sconn = 0;
288 } else if (cm->type == MSGTYPE_CONNECT_SUCCESS) {
289 BUG_ON(cm->msg.connect_success.rconn == 0);
290 kref_put(&(cm->msg.connect_success.rconn->ref), free_conn);
291 cm->msg.connect_success.rconn = 0;
292 } else if (cm->type == MSGTYPE_RESET_CONN ||
293 cm->type == MSGTYPE_CONNID_UNKNOWN) {
294 struct unknownconnid_matchparam ucm;
296 ucm.nb = cm->nb;
297 ucm.conn_id = cm->msg.reset_connidunknown.conn_id_unknown;
299 htable_delete(&unknown_connids, ucm_to_key(&ucm), &ucm,
300 cmsg_kref_free);
303 kref_put(&(cm->ref), cmsg_kref_free);
306 static void free_control_retrans(struct kref *ref)
308 struct control_retrans *cr = container_of(ref, struct control_retrans,
309 ref);
311 while (list_empty(&(cr->msgs)) == 0) {
312 struct control_msg_out *cm = container_of(cr->msgs.next,
313 struct control_msg_out, lh);
314 list_del(&(cm->lh));
315 free_control_msg(cm);
318 kmem_cache_free(controlretrans_slab, cr);
322 static void set_retrans_timeout(struct control_retrans *cr, struct neighbor *nb)
324 cr->timeout = jiffies + usecs_to_jiffies(100000 +
325 ((__u32) atomic_read(&(nb->latency))) * 2 +
326 ((__u32) atomic_read(&(nb->max_remote_cmsg_delay))));
329 void retransmit_timerfunc(struct work_struct *work)
331 unsigned long iflags;
333 struct neighbor *nb = container_of(to_delayed_work(work),
334 struct neighbor, retrans_timer);
336 int nbstate;
337 int nbput = 0;
339 spin_lock_irqsave(&(nb->state_lock), iflags);
340 nbstate = nb->state;
341 spin_unlock_irqrestore(&(nb->state_lock), iflags);
343 while (1) {
344 struct control_retrans *cr = 0;
345 struct retransmit_matchparam rm;
347 spin_lock_irqsave(&(nb->retrans_lock), iflags);
349 if (list_empty(&(nb->retrans_list))) {
350 nb->retrans_timer_running = 0;
351 nbput = 1;
352 break;
355 cr = container_of(nb->retrans_list.next,
356 struct control_retrans, timeout_list);
358 BUG_ON(cr->nb != nb);
360 rm.seqno = cr->seqno;
361 rm.nb = nb;
363 list_del(&(cr->timeout_list));
365 if (unlikely(nbstate == NEIGHBOR_STATE_KILLED)) {
366 spin_unlock_irqrestore(&(nb->retrans_lock), iflags);
368 htable_delete(&retransmits, rm_to_key(&rm), &rm,
369 free_control_retrans);
370 kref_put(&(cr->ref), free_control_retrans);
371 continue;
374 if (time_after(cr->timeout, jiffies)) {
375 list_add(&(cr->timeout_list), &(nb->retrans_list));
376 schedule_delayed_work(&(nb->retrans_timer),
377 cr->timeout - jiffies);
378 break;
381 if (unlikely(htable_delete(&retransmits, rm_to_key(&rm), &rm,
382 free_control_retrans)))
383 BUG();
385 spin_unlock_irqrestore(&(nb->retrans_lock), iflags);
387 while (list_empty(&(cr->msgs)) == 0) {
388 struct control_msg_out *cm = container_of(cr->msgs.next,
389 struct control_msg_out, lh);
390 list_del(&(cm->lh));
391 add_control_msg(cm, 1);
394 kref_put(&(cr->ref), free_control_retrans);
397 spin_unlock_irqrestore(&(nb->retrans_lock), iflags);
399 if (nbput)
400 kref_put(&(nb->ref), neighbor_free);
403 static void schedule_retransmit(struct control_retrans *cr, struct neighbor *nb)
405 unsigned long iflags;
407 struct retransmit_matchparam rm;
408 int first;
410 rm.seqno = cr->seqno;
411 rm.nb = nb;
413 set_retrans_timeout(cr, nb);
415 spin_lock_irqsave(&(nb->retrans_lock), iflags);
416 htable_insert(&retransmits, (char *) cr, rm_to_key(&rm));
417 first = list_empty(&(nb->retrans_list));
418 list_add_tail(&(cr->timeout_list), &(nb->retrans_list));
420 if (first && nb->retrans_timer_running == 0) {
421 schedule_delayed_work(&(nb->retrans_timer),
422 cr->timeout - jiffies);
423 nb->retrans_timer_running = 1;
424 kref_get(&(nb->ref));
427 spin_unlock_irqrestore(&(nb->retrans_lock), iflags);
430 void kern_ack_rcvd(struct neighbor *nb, __u32 seqno)
432 unsigned long iflags;
434 struct control_retrans *cr = 0;
435 struct retransmit_matchparam rm;
437 rm.seqno = seqno;
438 rm.nb = nb;
440 spin_lock_irqsave(&(nb->retrans_lock), iflags);
442 cr = (struct control_retrans *) htable_get(&retransmits, rm_to_key(&rm),
443 &rm);
445 if (cr == 0) {
446 printk(KERN_ERR "bogus/duplicate ack received");
447 goto out;
450 if (unlikely(htable_delete(&retransmits, rm_to_key(&rm), &rm,
451 free_control_retrans)))
452 BUG();
454 BUG_ON(cr->nb != nb);
456 list_del(&(cr->timeout_list));
458 out:
459 spin_unlock_irqrestore(&(nb->retrans_lock), iflags);
461 if (cr != 0) {
462 kref_put(&(cr->ref), free_control_retrans); /* htable_get */
463 kref_put(&(cr->ref), free_control_retrans); /* list */
467 static void padding(struct sk_buff *skb, int length)
469 char *dst;
470 if (length <= 0)
471 return;
472 dst = skb_put(skb, length);
473 BUG_ON(dst == 0);
474 memset(dst, KP_PADDING, length);
477 static int add_ack(struct sk_buff *skb, struct control_retrans *cr,
478 struct control_msg_out *cm, int spaceleft)
480 char *dst;
482 if (unlikely(spaceleft < 5))
483 return 0;
485 dst = skb_put(skb, 5);
486 BUG_ON(dst == 0);
488 dst[0] = KP_ACK;
489 put_u32(dst + 1, cm->msg.ack.seqno, 1);
491 atomic_dec(&(cm->nb->ucmcnt));
492 free_control_msg(cm);
494 return 5;
497 static int add_ack_conn(struct sk_buff *skb, struct control_retrans *cr,
498 struct control_msg_out *cm, int spaceleft)
500 char *dst;
501 int offset = 0;
503 if (unlikely(spaceleft < cm->length))
504 return 0;
506 dst = skb_put(skb, cm->length);
507 BUG_ON(dst == 0);
509 dst[offset] = KP_ACK_CONN;
510 offset++;
511 put_u32(dst + offset, cm->msg.ack_conn.conn_id, 1);
512 offset += 4;
513 dst[offset] = cm->msg.ack_conn.flags;
514 offset++;
516 if ((cm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_SEQNO) != 0) {
517 put_u32(dst + offset, cm->msg.ack_conn.seqno, 1);
518 offset += 4;
520 if ((cm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_WINDOW) != 0) {
521 BUG_ON(cm->msg.ack_conn.rconn == 0);
522 dst[offset] = get_window(cm->msg.ack_conn.rconn,
523 cm->nb);
524 offset++;
528 if (ooolen(cm->msg.ack_conn.flags) != 0) {
529 put_u32(dst + offset, cm->msg.ack_conn.seqno_ooo, 1);
530 offset += 4;
531 if (ooolen(cm->msg.ack_conn.flags) == 1) {
532 BUG_ON(cm->msg.ack_conn.length > 255);
533 dst[offset] = cm->msg.ack_conn.length;
534 offset += 1;
535 } else if (ooolen(cm->msg.ack_conn.flags) == 2) {
536 BUG_ON(cm->msg.ack_conn.length <= 255);
537 BUG_ON(cm->msg.ack_conn.length > 65535);
538 put_u16(dst + offset, cm->msg.ack_conn.length, 1);
539 offset += 2;
540 } else if (ooolen(cm->msg.ack_conn.flags) == 4) {
541 BUG_ON(cm->msg.ack_conn.length <= 65535);
542 put_u32(dst + offset, cm->msg.ack_conn.length, 1);
543 offset += 4;
544 } else {
545 BUG();
549 if ((cm->msg.ack_conn.flags & KP_ACK_CONN_FLAGS_CREDITS) != 0) {
550 __u16 value = cm->msg.ack_conn.decaytime + (
551 cm->msg.ack_conn.decaytime_seqno << 10);
553 BUG_ON(cm->msg.ack_conn.decaytime >= 1024);
554 BUG_ON(cm->msg.ack_conn.decaytime_seqno >= 64);
556 put_u16(dst + offset, value, 1);
557 offset += 2;
560 list_add_tail(&(cm->lh), &(cr->msgs));
562 BUG_ON(offset != cm->length);
563 return offset;
566 static int add_ping(struct sk_buff *skb, __u32 cookie,
567 int spaceleft)
569 char *dst;
571 if (unlikely(spaceleft < 5))
572 return 0;
574 dst = skb_put(skb, 5);
575 BUG_ON(dst == 0);
577 dst[0] = KP_PING;
578 put_u32(dst + 1, cookie, 0);
580 return 5;
583 static int add_pong(struct sk_buff *skb, struct control_retrans *cr,
584 struct control_msg_out *cm, int spaceleft)
586 char *dst;
588 if (unlikely(spaceleft < 9))
589 return 0;
591 if (cm->msg.pong.type == MSGTYPE_PONG_TIMEENQUEUED) {
592 __s64 now = ktime_to_ns(ktime_get());
593 __s64 enq = ktime_to_ns(cm->msg.pong.delaycomp.time_enqueued);
594 __s64 respdelay = (now - enq + 500) / 1000;
595 if (unlikely(respdelay >= (1LL << 32)))
596 respdelay = (1LL << 32) - 1;
597 cm->msg.pong.type = MSGTYPE_PONG_RESPDELAY;
598 cm->msg.pong.delaycomp.respdelay = (__u32) respdelay;
601 BUG_ON(cm->msg.pong.type != MSGTYPE_PONG_RESPDELAY);
603 dst = skb_put(skb, 9);
604 BUG_ON(dst == 0);
606 dst[0] = KP_PONG;
607 put_u32(dst + 1, cm->msg.pong.cookie, 0);
608 put_u32(dst + 5, cm->msg.pong.delaycomp.respdelay, 1);
611 atomic_dec(&(cm->nb->ucmcnt));
612 list_add_tail(&(cm->lh), &(cr->msgs));
614 return 9;
617 #warning todo targettype might have changed
618 static __u16 get_credits(struct conn *sconn)
620 __u16 ret;
621 mutex_lock(&(sconn->reversedir->rcv_lock));
622 BUG_ON(sconn->reversedir->targettype != TARGET_OUT);
624 BUG_ON(sconn->reversedir->target.out.decaytime_last >= 1024);
625 BUG_ON(sconn->reversedir->target.out.decaytime_seqno >= 64);
626 ret = sconn->reversedir->target.out.decaytime_last + (
627 sconn->reversedir->target.out.decaytime_seqno <<
628 10);
629 mutex_unlock(&(sconn->reversedir->rcv_lock));
631 return ret;
634 static int add_connect(struct sk_buff *skb, struct control_retrans *cr,
635 struct control_msg_out *cm, int spaceleft)
637 char *dst;
639 if (unlikely(spaceleft < 12))
640 return 0;
642 dst = skb_put(skb, 12);
643 BUG_ON(dst == 0);
645 dst[0] = KP_CONNECT;
646 put_u32(dst + 1, cm->msg.connect.conn_id, 1);
647 put_u32(dst + 5, cm->msg.connect.init_seqno, 1);
648 BUG_ON(cm->msg.connect.sconn == 0);
649 dst[9] = get_window(cm->msg.connect.sconn, cm->nb);
650 put_u16(dst + 10, get_credits(cm->msg.connect.sconn), 1);
652 list_add_tail(&(cm->lh), &(cr->msgs));
654 return 12;
657 static int add_connect_success(struct sk_buff *skb, struct control_retrans *cr,
658 struct control_msg_out *cm, int spaceleft)
660 char *dst;
662 if (unlikely(spaceleft < 16))
663 return 0;
665 dst = skb_put(skb, 16);
666 BUG_ON(dst == 0);
668 dst[0] = KP_CONNECT_SUCCESS;
669 put_u32(dst + 1, cm->msg.connect_success.rcvd_conn_id, 1);
670 put_u32(dst + 5, cm->msg.connect_success.gen_conn_id, 1);
671 put_u32(dst + 9, cm->msg.connect_success.init_seqno, 1);
672 BUG_ON(cm->msg.connect_success.rconn == 0);
673 dst[13] = get_window(cm->msg.connect_success.rconn, cm->nb);
674 put_u16(dst + 14, get_credits(cm->msg.connect_success.rconn), 1);
676 list_add_tail(&(cm->lh), &(cr->msgs));
678 return 16;
681 static int add_reset_conn(struct sk_buff *skb, struct control_retrans *cr,
682 struct control_msg_out *cm, int spaceleft)
684 char *dst;
686 if (unlikely(spaceleft < 5))
687 return 0;
689 dst = skb_put(skb, 5);
690 BUG_ON(dst == 0);
692 dst[0] = KP_RESET_CONN;
693 put_u32(dst + 1, cm->msg.reset_connidunknown.conn_id_reset, 1);
695 list_add_tail(&(cm->lh), &(cr->msgs));
697 return 5;
700 static int add_conndata(struct sk_buff *skb, struct control_retrans *cr,
701 struct control_msg_out *cm, int spaceleft,
702 struct control_msg_out **split_conndata, __u32 *sc_sendlen)
704 char *dst;
706 int totallen = cm->msg.conn_data.datalen + 11;
707 int putlen = min(totallen, spaceleft);
708 int dataputlen = putlen - 11;
710 BUG_ON(split_conndata == 0);
711 BUG_ON(sc_sendlen == 0);
713 if (dataputlen < 1 || (spaceleft < 25 && spaceleft < totallen))
714 return 0;
716 dst = skb_put(skb, putlen);
717 BUG_ON(dst == 0);
719 dst[0] = KP_CONN_DATA;
720 put_u32(dst + 1, cm->msg.conn_data.conn_id, 1);
721 put_u32(dst + 5, cm->msg.conn_data.seqno, 1);
722 put_u16(dst + 9, dataputlen, 1);
724 memcpy(dst + 11, cm->msg.conn_data.data, dataputlen);
726 if (cm->msg.conn_data.datalen == dataputlen) {
727 list_add_tail(&(cm->lh), &(cr->msgs));
728 } else {
729 *split_conndata = cm;
730 *sc_sendlen = dataputlen;
733 return putlen;
736 static int add_connid_unknown(struct sk_buff *skb, struct control_retrans *cr,
737 struct control_msg_out *cm, int spaceleft)
739 char *dst;
741 if (unlikely(spaceleft < 5))
742 return 0;
744 dst = skb_put(skb, 5);
745 BUG_ON(dst == 0);
747 dst[0] = KP_CONNID_UNKNOWN;
748 put_u32(dst + 1, cm->msg.reset_connidunknown.conn_id_unknown, 1);
750 list_add_tail(&(cm->lh), &(cr->msgs));
752 return 5;
755 static int add_set_max_cmsg_dly(struct sk_buff *skb, struct control_retrans *cr,
756 struct control_msg_out *cm, int spaceleft)
758 char *dst;
760 if (unlikely(spaceleft < 5))
761 return 0;
763 dst = skb_put(skb, 5);
764 BUG_ON(dst == 0);
766 dst[0] = KP_SET_MAX_CMSG_DELAY;
767 put_u32(dst + 1, cm->msg.set_max_cmsg_delay.delay, 1);
769 list_add_tail(&(cm->lh), &(cr->msgs));
771 return 5;
774 static int add_message(struct sk_buff *skb, struct control_retrans *cr,
775 struct control_msg_out *cm, int spaceleft,
776 struct control_msg_out **split_conndata, __u32 *sc_sendlen)
778 BUG_ON(split_conndata != 0 && *split_conndata != 0);
779 BUG_ON(sc_sendlen != 0 && *sc_sendlen != 0);
781 switch (cm->type) {
782 case MSGTYPE_ACK:
783 return add_ack(skb, cr, cm, spaceleft);
784 case MSGTYPE_ACK_CONN:
785 return add_ack_conn(skb, cr, cm, spaceleft);
786 case MSGTYPE_PONG:
787 return add_pong(skb, cr, cm, spaceleft);
788 case MSGTYPE_CONNECT:
789 return add_connect(skb, cr, cm, spaceleft);
790 case MSGTYPE_CONNECT_SUCCESS:
791 return add_connect_success(skb, cr, cm, spaceleft);
792 case MSGTYPE_RESET_CONN:
793 return add_reset_conn(skb, cr, cm, spaceleft);
794 case MSGTYPE_CONNDATA:
795 return add_conndata(skb, cr, cm, spaceleft, split_conndata,
796 sc_sendlen);
797 case MSGTYPE_CONNID_UNKNOWN:
798 return add_connid_unknown(skb, cr, cm, spaceleft);
799 case MSGTYPE_SET_MAX_CMSG_DELAY:
800 return add_set_max_cmsg_dly(skb, cr, cm, spaceleft);
801 default:
802 BUG();
804 BUG();
805 return 0;
808 static __u32 __send_messages(struct neighbor *nb, struct sk_buff *skb,
809 struct control_retrans *cr, int spaceleft, int urgentonly,
810 struct control_msg_out **split_conndata, __u32 *sc_sendlen)
812 __u32 length = 0;
813 mutex_lock(&(nb->cmsg_lock));
814 while (!list_empty(&(nb->ucontrol_msgs_out)) || (!urgentonly &&
815 !list_empty(&(nb->control_msgs_out)))) {
816 int rc;
818 int urgent = !list_empty(&(nb->ucontrol_msgs_out));
820 struct control_msg_out *cm;
822 if (urgent)
823 cm = container_of(nb->ucontrol_msgs_out.next,
824 struct control_msg_out, lh);
825 else
826 cm = container_of(nb->control_msgs_out.next,
827 struct control_msg_out, lh);
829 list_del(&(cm->lh));
830 if (urgent)
831 nb->ucmlength -= cm->length;
832 else
833 nb->cmlength -= cm->length;
834 mutex_unlock(&(nb->cmsg_lock));
835 rc = add_message(skb, cr, cm, spaceleft - length,
836 split_conndata, sc_sendlen);
837 mutex_lock(&(nb->cmsg_lock));
839 if (rc == 0) {
840 if (urgent) {
841 list_add(&(cm->lh), &(nb->ucontrol_msgs_out));
842 nb->ucmlength += cm->length;
843 } else {
844 list_add(&(cm->lh), &(nb->control_msgs_out));
845 nb->cmlength += cm->length;
847 break;
850 length += rc;
852 mutex_unlock(&(nb->cmsg_lock));
854 return length;
857 static int __send_messages_smcd(struct neighbor *nb, struct sk_buff *skb,
858 struct control_retrans *cr, int spaceleft)
860 struct control_msg_out *cm;
861 int rc;
863 cm = alloc_control_msg(nb, ACM_PRIORITY_LOW);
865 if (unlikely(cm == 0))
866 return 0;
868 cm->type = MSGTYPE_SET_MAX_CMSG_DELAY;
869 cm->msg.set_max_cmsg_delay.delay = CMSG_INTERVAL_MS * 10;
870 cm->length = 5;
872 rc = add_message(skb, cr, cm, spaceleft, 0, 0);
874 nb->max_cmsg_delay_sent = 1;
876 return rc;
879 static int _send_messages(struct neighbor *nb, struct sk_buff *skb, int ping,
880 struct control_retrans *cr, int spaceleft, int urgentonly)
882 int rc;
883 int length = 0;
884 __u32 pingcookie = 0;
885 unsigned long last_ping_time;
886 struct control_msg_out *split_conndata = 0;
887 __u32 sc_sendlen = 0;
889 mutex_lock(&(nb->cmsg_lock));
891 if (ping != 0) {
892 int rc;
893 pingcookie = add_ping_req(nb, &last_ping_time);
894 rc = add_ping(skb, pingcookie, spaceleft - length);
895 BUG_ON(rc == 0);
896 length += rc;
899 if (likely(urgentonly == 0) && unlikely(nb->max_cmsg_delay_sent == 0))
900 length += __send_messages_smcd(nb, skb, cr, spaceleft - length);
902 mutex_unlock(&(nb->cmsg_lock));
904 length += __send_messages(nb, skb, cr, spaceleft - length, urgentonly,
905 &split_conndata, &sc_sendlen);
907 if (unlikely(length > spaceleft))
908 printk(KERN_ERR "error cor/kpacket_gen: length > spaceleft!?");
910 padding(skb, spaceleft - length);
912 rc = dev_queue_xmit(skb);
914 if (rc != 0) {
915 unadd_ping_req(nb, pingcookie, last_ping_time);
917 while (list_empty(&(cr->msgs)) == 0) {
918 struct control_msg_out *cm = container_of(cr->msgs.prev,
919 struct control_msg_out, lh);
920 list_del(&(cm->lh));
921 add_control_msg(cm, 1);
924 if (split_conndata != 0) {
925 add_control_msg(split_conndata, 1);
928 kref_put(&(cr->ref), free_control_retrans);
929 } else {
930 struct list_head *curr = cr->msgs.next;
932 while(curr != &(cr->msgs)) {
933 struct control_msg_out *cm = container_of(curr,
934 struct control_msg_out, lh);
936 curr = curr->next;
938 if (cm->type == MSGTYPE_CONNDATA) {
939 list_del(&(cm->lh));
940 kfree(cm->msg.conn_data.data_orig);
941 free_control_msg(cm);
945 if (split_conndata != 0) {
946 BUG_ON(sc_sendlen == 0);
947 BUG_ON(sc_sendlen >=
948 split_conndata->msg.conn_data.datalen);
950 split_conndata->msg.conn_data.data += sc_sendlen;
951 split_conndata->msg.conn_data.datalen -= sc_sendlen;
953 send_conndata(split_conndata,
954 split_conndata->msg.conn_data.conn_id,
955 split_conndata->msg.conn_data.seqno,
956 split_conndata->msg.conn_data.data_orig,
957 split_conndata->msg.conn_data.data,
958 split_conndata->msg.conn_data.datalen);
962 if (list_empty(&(cr->msgs)))
963 kref_put(&(cr->ref), free_control_retrans);
964 else
965 schedule_retransmit(cr, nb);
968 return rc;
971 static __u32 get_total_messages_length(struct neighbor *nb, int ping,
972 int urgentonly)
974 __u32 length = nb->ucmlength;
976 if (likely(urgentonly == 0)) {
977 length += nb->cmlength;
979 if (unlikely(nb->max_cmsg_delay_sent == 0))
980 length += 5;
982 if (ping == 2 || (length > 0 && ping != 0))
983 length += 5;
985 return length;
988 static int reset_timeouted_conn_needed(struct neighbor *nb, struct conn *src_in)
990 if (unlikely(unlikely(src_in->sourcetype != SOURCE_IN) ||
991 unlikely(src_in->source.in.nb != nb) ||
992 unlikely(atomic_read(&(src_in->isreset)) != 0)))
993 return 0;
994 else if (likely(time_after(src_in->source.in.jiffies_last_act +
995 CONN_ACTIVITY_UPDATEINTERVAL_SEC * HZ +
996 CONN_INACTIVITY_TIMEOUT_SEC * HZ, jiffies)))
997 return 0;
999 return 1;
1002 static void reset_timeouted_conns(struct neighbor *nb)
1004 int i;
1005 for (i=0;i<10000;i++) {
1006 unsigned long iflags;
1007 struct conn *src_in;
1009 int resetrc = 1;
1010 int rc = 0;
1012 spin_lock_irqsave(&(nb->conn_list_lock), iflags);
1014 if (list_empty(&(nb->rcv_conn_list))) {
1015 spin_unlock_irqrestore(&(nb->conn_list_lock), iflags);
1016 break;
1019 src_in = container_of(nb->rcv_conn_list.next, struct conn,
1020 source.in.nb_list);
1021 kref_get(&(src_in->ref));
1023 spin_unlock_irqrestore(&(nb->conn_list_lock), iflags);
1026 mutex_lock(&(src_in->rcv_lock));
1028 resetrc = reset_timeouted_conn_needed(nb, src_in);
1029 if (likely(resetrc == 0))
1030 goto put;
1032 rc = send_reset_conn(nb, src_in->reversedir->target.out.conn_id,
1033 src_in->source.in.conn_id, 1);
1034 if (unlikely(rc != 0))
1035 goto put;
1037 atomic_cmpxchg(&(src_in->reversedir->isreset), 0, 1);
1038 mutex_unlock(&(src_in->rcv_lock));
1039 reset_conn(src_in);
1041 if (0) {
1042 put:
1043 mutex_unlock(&(src_in->rcv_lock));
1045 kref_put(&(src_in->ref), free_conn);
1047 if (likely(resetrc == 0) || rc != 0)
1048 break;
1052 int send_messages(struct neighbor *nb, int resume)
1054 int i;
1055 int rc = 0;
1056 int ping;
1057 int targetmss = mss(nb);
1059 int nbstate = get_neigh_state(nb);
1060 int urgentonly = (nbstate != NEIGHBOR_STATE_ACTIVE);
1062 if (likely(urgentonly == 0))
1063 reset_timeouted_conns(nb);
1065 mutex_lock(&(nb->send_cmsg_lock));
1066 mutex_lock(&(nb->cmsg_lock));
1068 ping = time_to_send_ping(nb);
1070 for (i=0;1;i++) {
1071 __u32 length;
1073 __u32 seqno;
1074 struct sk_buff *skb;
1075 struct control_retrans *cr;
1077 BUG_ON(list_empty(&(nb->control_msgs_out)) &&
1078 (nb->cmlength != 0));
1079 BUG_ON((list_empty(&(nb->control_msgs_out)) == 0) &&
1080 (nb->cmlength == 0));
1081 BUG_ON(list_empty(&(nb->ucontrol_msgs_out)) &&
1082 (nb->ucmlength != 0));
1083 BUG_ON((list_empty(&(nb->ucontrol_msgs_out)) == 0) &&
1084 (nb->ucmlength == 0));
1085 BUG_ON(nb->cmlength < 0);
1086 BUG_ON(nb->ucmlength < 0);
1088 length = get_total_messages_length(nb, ping, urgentonly);
1090 if (length == 0)
1091 break;
1093 if (length < targetmss && i > 0)
1094 break;
1096 seqno = atomic_add_return(1, &(nb->kpacket_seqno));
1098 if (length > targetmss)
1099 length = targetmss;
1101 mutex_unlock(&(nb->cmsg_lock));
1102 skb = create_packet(nb, length, GFP_KERNEL, 0, seqno);
1103 if (unlikely(skb == 0)) {
1104 printk(KERN_ERR "cor: send_messages: cannot allocate "
1105 "skb (out of memory?)");
1106 goto oom;
1109 cr = kmem_cache_alloc(controlretrans_slab, GFP_KERNEL);
1110 if (unlikely(cr == 0)) {
1111 kfree_skb(skb);
1112 printk(KERN_ERR "cor: send_messages: cannot allocate "
1113 "control_retrans (out of memory?)");
1114 goto oom;
1116 memset(cr, 0, sizeof(struct control_retrans));
1117 kref_init(&(cr->ref));
1118 cr->nb = nb;
1119 cr->seqno = seqno;
1120 INIT_LIST_HEAD(&(cr->msgs));
1122 rc = _send_messages(nb, skb, ping, cr, length, urgentonly);
1123 ping = 0;
1125 mutex_lock(&(nb->cmsg_lock));
1127 if (rc != 0)
1128 break;
1131 if (0) {
1132 oom:
1133 mutex_lock(&(nb->cmsg_lock));
1136 if (rc != 0) {
1137 if (resume == 0)
1138 qos_enqueue(nb->dev, &(nb->rb_kp), QOS_CALLER_KPACKET);
1139 } else {
1140 atomic_set(&(nb->cmsg_work_scheduled), 0);
1141 schedule_controlmsg_timer(nb);
1144 mutex_unlock(&(nb->cmsg_lock));
1145 mutex_unlock(&(nb->send_cmsg_lock));
1147 if (rc == 0)
1148 kref_put(&(nb->ref), neighbor_free);
1150 return rc;
1153 void controlmsg_workfunc(struct work_struct *work)
1155 struct neighbor *nb = container_of(work, struct neighbor, cmsg_work);
1156 send_messages(nb, 0);
1159 static void schedule_cmsg_work(struct neighbor *nb)
1161 if (atomic_cmpxchg(&(nb->cmsg_work_scheduled), 0, 1) == 0) {
1162 kref_get(&(nb->ref));
1163 atomic_cmpxchg(&(nb->cmsg_timer_running), 1, 2);
1164 schedule_work(&(nb->cmsg_work));
1168 void controlmsg_timerfunc(unsigned long arg)
1170 struct neighbor *nb = (struct neighbor *) arg;
1172 int oldval = atomic_xchg(&(nb->cmsg_timer_running), 0);
1174 BUG_ON(oldval == 0);
1176 if (likely(oldval == 1))
1177 schedule_cmsg_work(nb);
1178 kref_put(&(nb->ref), neighbor_free);
1181 static unsigned long get_cmsg_timeout(struct neighbor *nb, int nbstate)
1183 unsigned long timeout = get_next_ping_time(nb);
1185 if (likely(nbstate == NEIGHBOR_STATE_ACTIVE) &&
1186 list_empty(&(nb->control_msgs_out)) == 0) {
1187 struct control_msg_out *first = container_of(
1188 nb->control_msgs_out.next,
1189 struct control_msg_out, lh);
1190 if (time_before(first->timeout, jiffies +
1191 usecs_to_jiffies(nb->cmsg_interval)))
1192 timeout = jiffies;
1193 else if (time_before(first->timeout, timeout))
1194 timeout = first->timeout;
1197 if (list_empty(&(nb->ucontrol_msgs_out)) == 0) {
1198 struct control_msg_out *first = container_of(
1199 nb->ucontrol_msgs_out.next,
1200 struct control_msg_out, lh);
1201 if (time_before(first->timeout, jiffies +
1202 usecs_to_jiffies(nb->cmsg_interval)))
1203 timeout = jiffies;
1204 else if (time_before(first->timeout, timeout))
1205 timeout = first->timeout;
1208 return timeout;
1211 static int cmsg_full_packet(struct neighbor *nb, int nbstate)
1213 int ping = time_to_send_ping(nb);
1214 int urgentonly = (nbstate != NEIGHBOR_STATE_ACTIVE);
1215 __u32 len = get_total_messages_length(nb, ping, urgentonly);
1217 if (len == 0)
1218 return 0;
1219 if (len < mss(nb))
1220 return 0;
1222 return 1;
1225 void schedule_controlmsg_timer(struct neighbor *nb)
1227 unsigned long timeout;
1228 int state = get_neigh_state(nb);
1230 if (unlikely(state == NEIGHBOR_STATE_KILLED)) {
1231 atomic_cmpxchg(&(nb->cmsg_timer_running), 1, 2);
1232 return;
1235 if (unlikely(atomic_read(&(nb->cmsg_work_scheduled)) == 1))
1236 return;
1238 if (cmsg_full_packet(nb, state))
1239 goto now;
1241 timeout = get_cmsg_timeout(nb, state);
1243 if (time_before_eq(timeout, jiffies)) {
1244 now:
1245 schedule_cmsg_work(nb);
1246 } else {
1247 if (atomic_xchg(&(nb->cmsg_timer_running), 1) == 0)
1248 kref_get(&(nb->ref));
1249 mod_timer(&(nb->cmsg_timer), timeout);
1253 static void free_oldest_ucm(struct neighbor *nb)
1255 struct control_msg_out *cm = container_of(nb->ucontrol_msgs_out.next,
1256 struct control_msg_out, lh);
1258 BUG_ON(list_empty(&(nb->ucontrol_msgs_out)));
1259 BUG_ON(isurgent(cm) == 0);
1261 list_del(&(cm->lh));
1262 nb->ucmlength -= cm->length;
1263 atomic_dec(&(nb->ucmcnt));
1264 free_control_msg(cm);
1267 static void add_control_msg(struct control_msg_out *cm, int retrans)
1269 int nbstate;
1270 __u64 newinterval;
1271 unsigned long jiffies_tmp;
1273 BUG_ON(cm->nb == 0);
1275 nbstate = get_neigh_state(cm->nb);
1277 BUG_ON(cm == 0);
1278 BUG_ON(cm->lh.next != LIST_POISON1 || cm->lh.prev != LIST_POISON2);
1280 cm->timeout = jiffies + msecs_to_jiffies(CMSG_INTERVAL_MS);
1282 mutex_lock(&(cm->nb->cmsg_lock));
1284 if (isurgent(cm)) {
1285 long msgs;
1287 msgs = atomic_inc_return(&(cm->nb->ucmcnt));
1288 BUG_ON(msgs <= 0);
1290 if (unlikely(retrans)) {
1291 if (msgs > MAX_URGENT_CMSGS_PER_NEIGH_RETRANSALLOW ||
1292 msgs > MAX_URGENT_CMSGS_PER_NEIGH) {
1293 atomic_dec(&(cm->nb->ucmcnt));
1294 free_control_msg(cm);
1295 goto out;
1298 cm->nb->ucmlength += cm->length;
1299 list_add(&(cm->lh), &(cm->nb->ucontrol_msgs_out));
1300 } else {
1301 if (msgs > MAX_URGENT_CMSGS_PER_NEIGH) {
1302 free_oldest_ucm(cm->nb);
1305 cm->nb->ucmlength += cm->length;
1306 list_add_tail(&(cm->lh), &(cm->nb->ucontrol_msgs_out));
1308 } else {
1309 cm->nb->cmlength += cm->length;
1310 list_add_tail(&(cm->lh), &(cm->nb->control_msgs_out));
1313 jiffies_tmp = jiffies;
1314 newinterval = (((__u64) cm->nb->cmsg_interval) * 255 +
1315 jiffies_to_usecs(jiffies_tmp -
1316 cm->nb->jiffies_last_cmsg)) / 256;
1317 cm->nb->jiffies_last_cmsg = jiffies_tmp;
1318 if (unlikely(newinterval > (1LL << 32) - 1))
1319 cm->nb->cmsg_interval = (__u32) ((1LL << 32) - 1);
1320 else
1321 cm->nb->cmsg_interval = newinterval;
1323 schedule_controlmsg_timer(cm->nb);
1325 out:
1326 mutex_unlock(&(cm->nb->cmsg_lock));
1329 void send_pong(struct neighbor *nb, __u32 cookie)
1331 struct control_msg_out *cm = _alloc_control_msg(nb, 0, 1);
1333 if (unlikely(cm == 0))
1334 return;
1336 cm->nb = nb;
1337 cm->type = MSGTYPE_PONG;
1338 cm->msg.pong.cookie = cookie;
1339 cm->msg.pong.type = MSGTYPE_PONG_TIMEENQUEUED;
1340 cm->msg.pong.delaycomp.time_enqueued = ktime_get();
1341 cm->length = 9;
1342 add_control_msg(cm, 0);
1345 int send_reset_conn(struct neighbor *nb, __u32 conn_id_reset,
1346 __u32 conn_id_unknown, int lowprio)
1348 unsigned long iflags;
1349 struct unknownconnid_matchparam ucm;
1350 struct control_msg_out *cm = alloc_control_msg(nb, lowprio ?
1351 ACM_PRIORITY_LOW : ACM_PRIORITY_HIGH);
1353 if (unlikely(cm == 0))
1354 return 1;
1356 cm->type = MSGTYPE_RESET_CONN;
1357 cm->msg.reset_connidunknown.conn_id_reset = conn_id_reset;
1358 cm->msg.reset_connidunknown.conn_id_unknown = conn_id_unknown;
1359 cm->length = 5;
1361 ucm.nb = nb;
1362 ucm.conn_id = conn_id_unknown;
1364 spin_lock_irqsave(&unknown_connids_lock, iflags);
1365 BUG_ON(htable_get(&unknown_connids, ucm_to_key(&ucm), &ucm) != 0);
1366 htable_insert(&unknown_connids, (char *) cm, ucm_to_key(&ucm));
1367 spin_unlock_irqrestore(&unknown_connids_lock, iflags);
1369 add_control_msg(cm, 0);
1371 return 0;
1374 void send_ack(struct neighbor *nb, __u32 seqno)
1376 struct control_msg_out *cm = _alloc_control_msg(nb, 0, 1);
1378 if (unlikely(cm == 0))
1379 return;
1381 cm->nb = nb;
1382 cm->type = MSGTYPE_ACK;
1383 cm->msg.ack.seqno = seqno;
1384 cm->length = 5;
1385 add_control_msg(cm, 0);
1388 #warning todo conn naming/locking
1389 void send_ack_conn(struct control_msg_out *cm, struct conn *rconn,
1390 __u32 conn_id, __u32 seqno)
1392 cm->type = MSGTYPE_ACK_CONN;
1393 kref_get(&(rconn->ref));
1394 BUG_ON(rconn->sourcetype != SOURCE_IN);
1395 cm->msg.ack_conn.flags = KP_ACK_CONN_FLAGS_SEQNO |
1396 KP_ACK_CONN_FLAGS_WINDOW;
1397 cm->msg.ack_conn.rconn = rconn;
1398 cm->msg.ack_conn.conn_id = conn_id;
1399 cm->msg.ack_conn.seqno = seqno;
1400 cm->length = 6 + ack_conn_len(cm->msg.ack_conn.flags);
1401 add_control_msg(cm, 0);
1404 void send_ack_conn_ooo(struct control_msg_out *cm, struct conn *rconn,
1405 __u32 conn_id, __u32 seqno_ooo, __u32 length)
1407 cm->type = MSGTYPE_ACK_CONN;
1408 kref_get(&(rconn->ref));
1409 BUG_ON(rconn->sourcetype != SOURCE_IN);
1410 cm->msg.ack_conn.flags = ooolen_to_flags(length);
1411 cm->msg.ack_conn.rconn = rconn;
1412 cm->msg.ack_conn.conn_id = conn_id;
1413 cm->msg.ack_conn.seqno_ooo = seqno_ooo;
1414 cm->msg.ack_conn.length = length;
1415 cm->length = 6 + ack_conn_len(cm->msg.ack_conn.flags);
1416 add_control_msg(cm, 0);
1419 void send_decaytime(struct conn *rconn, int force, __u16 decaytime)
1421 struct control_msg_out *cm;
1423 #warning todo unforced send
1424 if (force == 0)
1425 return;
1427 cm = alloc_control_msg(rconn->target.out.nb, ACM_PRIORITY_LOW);
1429 if (cm == 0)
1430 return;
1432 cm->type = MSGTYPE_ACK_CONN;
1433 kref_get(&(rconn->ref));
1434 BUG_ON(rconn->targettype != TARGET_OUT);
1435 cm->msg.ack_conn.flags = KP_ACK_CONN_FLAGS_CREDITS;
1436 cm->msg.ack_conn.rconn = rconn->reversedir;
1437 cm->msg.ack_conn.conn_id = rconn->target.out.conn_id;
1438 cm->msg.ack_conn.decaytime_seqno = rconn->target.out.decaytime_seqno;
1439 cm->msg.ack_conn.decaytime = decaytime;
1441 cm->length = 6 + ack_conn_len(cm->msg.ack_conn.flags);
1442 add_control_msg(cm, 0);
1444 rconn->target.out.decaytime_last = decaytime;
1445 rconn->target.out.decaytime_seqno = (rconn->target.out.decaytime_seqno +
1446 1) % 64;
1447 rconn->target.out.decaytime_send_allowed = 0;
1450 void send_connect_success(struct control_msg_out *cm, __u32 rcvd_conn_id,
1451 __u32 gen_conn_id, __u32 init_seqno, struct conn *rconn)
1453 cm->type = MSGTYPE_CONNECT_SUCCESS;
1454 cm->msg.connect_success.rcvd_conn_id = rcvd_conn_id;
1455 cm->msg.connect_success.gen_conn_id = gen_conn_id;
1456 cm->msg.connect_success.init_seqno = init_seqno;
1457 kref_get(&(rconn->ref));
1458 cm->msg.connect_success.rconn = rconn;
1459 cm->length = 16;
1460 add_control_msg(cm, 0);
1463 void send_connect_nb(struct control_msg_out *cm, __u32 conn_id,
1464 __u32 init_seqno, struct conn *sconn)
1466 cm->type = MSGTYPE_CONNECT;
1467 cm->msg.connect.conn_id = conn_id;
1468 cm->msg.connect.init_seqno = init_seqno;
1469 kref_get(&(sconn->ref));
1470 BUG_ON(sconn->sourcetype != SOURCE_IN);
1471 cm->msg.connect.sconn = sconn;
1472 cm->length = 12;
1473 add_control_msg(cm, 0);
1476 void send_conndata(struct control_msg_out *cm, __u32 conn_id, __u32 seqno,
1477 char *data_orig, char *data, __u32 datalen)
1479 cm->type = MSGTYPE_CONNDATA;
1480 cm->msg.conn_data.conn_id = conn_id;
1481 cm->msg.conn_data.seqno = seqno;
1482 cm->msg.conn_data.data_orig = data_orig;
1483 cm->msg.conn_data.data = data;
1484 cm->msg.conn_data.datalen = datalen;
1485 cm->length = 11 + datalen;
1486 add_control_msg(cm, 0);
1489 void send_connid_unknown(struct neighbor *nb, __u32 conn_id)
1491 unsigned long iflags;
1492 char *ret;
1493 struct unknownconnid_matchparam ucm;
1495 struct control_msg_out *cm = alloc_control_msg(nb, ACM_PRIORITY_HIGH);
1497 if (unlikely(cm == 0))
1498 return;
1500 cm->type = MSGTYPE_CONNID_UNKNOWN;
1501 cm->msg.reset_connidunknown.conn_id_unknown = conn_id;
1502 cm->length = 5;
1504 ucm.nb = nb;
1505 ucm.conn_id = conn_id;
1507 spin_lock_irqsave(&unknown_connids_lock, iflags);
1508 ret = htable_get(&unknown_connids, ucm_to_key(&ucm), &ucm);
1509 if (ret == 0)
1510 htable_insert(&unknown_connids, (char *) cm, ucm_to_key(&ucm));
1511 spin_unlock_irqrestore(&unknown_connids_lock, iflags);
1513 if (ret != 0) {
1514 struct control_msg_out *cm2 = (struct control_msg_out *) ret;
1516 BUG_ON(cm2->type != MSGTYPE_RESET_CONN &&
1517 cm2->type != MSGTYPE_CONNID_UNKNOWN);
1519 kref_put(&(cm2->ref), cmsg_kref_free);
1521 free_control_msg(cm);
1522 } else {
1523 add_control_msg(cm, 0);
1528 static int matches_connretrans(void *htentry, void *searcheditem)
1530 struct control_retrans *cr = (struct control_retrans *) htentry;
1531 struct retransmit_matchparam *rm = (struct retransmit_matchparam *)
1532 searcheditem;
1534 return rm->nb == cr->nb && rm->seqno == cr->seqno;
1537 static int matches_unknownconnid(void *htentry, void *searcheditem)
1539 struct control_msg_out *cm = (struct control_msg_out *) htentry;
1541 struct unknownconnid_matchparam *ucm =
1542 (struct unknownconnid_matchparam *)searcheditem;
1544 BUG_ON(cm->type != MSGTYPE_RESET_CONN &&
1545 cm->type != MSGTYPE_CONNID_UNKNOWN);
1547 return ucm->nb == cm->nb && ucm->conn_id ==
1548 cm->msg.reset_connidunknown.conn_id_unknown;
1551 void __init cor_kgen_init(void)
1553 controlmsg_slab = kmem_cache_create("cor_controlmsg",
1554 sizeof(struct control_msg_out), 8, 0, 0);
1555 controlretrans_slab = kmem_cache_create("cor_controlretransmsg",
1556 sizeof(struct control_retrans), 8, 0, 0);
1557 htable_init(&retransmits, matches_connretrans,
1558 offsetof(struct control_retrans, htab_entry),
1559 offsetof(struct control_retrans, ref));
1560 htable_init(&unknown_connids, matches_unknownconnid,
1561 offsetof(struct control_msg_out,
1562 msg.reset_connidunknown.htab_entry),
1563 offsetof(struct control_msg_out, ref));
1566 MODULE_LICENSE("GPL");