ref_counter --> kref conversation
[cor_2_6_31.git] / net / cor / neighbor.c
blobe8481bb98b53927eaae9436565b3897bd90c47bc
1 /*
2 * Connection oriented routing
3 * Copyright (C) 2007-2009 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include "cor.h"
23 /**
24 * Splited packet data format:
25 * announce proto version [4]
26 * is 0, may be increased if format changes
27 * packet version [4]
28 * starts with 0, increments every time the data field changes
29 * total size [4]
30 * total data size of all merged packets
31 * offset [4]
32 * used to determine the order when merging the split packet
33 * unit is bytes
34 * [data]
35 * commulative checksum [8] (not yet)
36 * chunk 1 contains the checksum of the data in chunk 1
37 * chunk 2 contains the checksum of the data in chunk 1+2
38 * ...
40 * Data format of the announce packet "data" field:
41 * min_announce_proto_version [4]
42 * max_announce_proto_version [4]
43 * min_cor_proto_version [4]
44 * max_cor_proto_version [4]
45 * versions which are understood
47 * command [4]
48 * commandlength [4]
49 * commanddata [commandlength]
52 /* Commands */
54 #define NEIGHCMD_ADDADDR 1
56 /**
57 * Parameter:
58 * addrtypelen [2]
59 * addrtype [addrtypelen]
60 * addrlen [2]
61 * addr [addrlen]
64 #warning todo
65 /**
66 * "I hear you" data format:
67 * challange [todo]
71 DEFINE_MUTEX(neighbor_operation_lock);
73 char *addrtype = "id";
74 char *addr;
75 int addrlen;
78 LIST_HEAD(nb_list);
79 struct kmem_cache *nb_slab;
81 LIST_HEAD(announce_out_list);
83 struct notifier_block netdev_notify;
86 #define ADDRTYPE_UNKNOWN 0
87 #define ADDRTYPE_ID 1
89 static int get_addrtype(__u32 addrtypelen, char *addrtype)
91 if (addrtypelen == 2 &&
92 (addrtype[0] == 'i' || addrtype[0] == 'I') &&
93 (addrtype[1] == 'd' || addrtype[1] == 'D'))
94 return ADDRTYPE_ID;
96 return ADDRTYPE_UNKNOWN;
99 void neighbor_free(struct kref *ref)
101 struct neighbor *nb = container_of(ref, struct neighbor, ref);
102 BUG_ON(nb->nb_list.next != 0 || nb->nb_list.prev != 0);
103 if (nb->addr != 0)
104 kfree(nb->addr);
105 nb->addr = 0;
106 if (nb->dev != 0)
107 dev_put(nb->dev);
108 nb->dev = 0;
109 kmem_cache_free(nb_slab, nb);
112 static struct neighbor *alloc_neighbor(gfp_t allocflags)
114 struct neighbor *nb = kmem_cache_alloc(nb_slab, allocflags);
116 __u32 seqno;
118 if (nb == 0)
119 return 0;
121 memset(nb, 0, sizeof(struct neighbor));
123 kref_init(&(nb->ref));
124 mutex_init(&(nb->cmsg_lock));
125 INIT_LIST_HEAD(&(nb->control_msgs_out));
126 nb->last_ping_time = jiffies;
127 atomic_set(&(nb->ooo_packets), 0);
128 get_random_bytes((char *) &seqno, sizeof(seqno));
129 mutex_init(&(nb->pingcookie_lock));
130 atomic_set(&(nb->latency), 0);
131 mutex_init(&(nb->state_lock));
132 atomic_set(&(nb->kpacket_seqno), seqno);
133 mutex_init(&(nb->conn_list_lock));
134 INIT_LIST_HEAD(&(nb->rcv_conn_list));
135 INIT_LIST_HEAD(&(nb->snd_conn_list));
136 spin_lock_init(&(nb->retrans_lock));
137 spin_lock_init(&(nb->retrans_lock));
138 skb_queue_head_init(&(nb->retrans_list));
140 return nb;
143 struct neighbor *get_neigh_by_mac(struct sk_buff *skb)
145 struct list_head *currlh;
146 struct neighbor *ret = 0;
149 char source_hw[MAX_ADDR_LEN];
150 memset(source_hw, 0, MAX_ADDR_LEN);
151 if (skb->dev->header_ops != 0 &&
152 skb->dev->header_ops->parse != 0)
153 skb->dev->header_ops->parse(skb, source_hw);
155 mutex_lock(&(neighbor_operation_lock));
157 currlh = nb_list.next;
159 while (currlh != &nb_list) {
160 struct neighbor *curr = container_of(currlh, struct neighbor,
161 nb_list);
163 if (memcmp(curr->mac, source_hw, MAX_ADDR_LEN) == 0) {
164 ret = curr;
165 kref_get(&(ret->ref));
167 goto out;
170 currlh = currlh->next;
173 out:
174 mutex_unlock(&(neighbor_operation_lock));
176 return ret;
179 struct neighbor *find_neigh(__u16 addrtypelen, __u8 *addrtype,
180 __u16 addrlen, __u8 *addr)
182 struct list_head *currlh;
183 struct neighbor *ret = 0;
185 if (get_addrtype(addrtypelen, addrtype) != ADDRTYPE_ID)
186 return 0;
188 mutex_lock(&(neighbor_operation_lock));
190 currlh = nb_list.next;
192 while (currlh != &nb_list) {
193 struct neighbor *curr = container_of(currlh, struct neighbor,
194 nb_list);
196 if (curr->addrlen == addrlen && memcmp(curr->addr, addr,
197 addrlen) == 0) {
198 ret = curr;
199 kref_get(&(ret->ref));
201 goto out;
204 currlh = currlh->next;
207 out:
208 mutex_unlock(&(neighbor_operation_lock));
210 return ret;
213 __u32 generate_neigh_list(char *buf, __u32 buflen, __u32 limit, __u32 offset)
215 struct list_head *currlh;
217 char *p_totalneighs = buf;
218 char *p_response_rows = buf + 4;
220 int bufferfull = 0;
222 __u32 total = 0;
223 __u32 cnt = 0;
225 __u32 buf_offset = 8;
227 BUG_ON(buf == 0);
228 BUG_ON(buflen < 8);
230 mutex_lock(&(neighbor_operation_lock));
232 currlh = nb_list.next;
234 while (currlh != &nb_list) {
235 struct neighbor *curr = container_of(currlh, struct neighbor,
236 nb_list);
238 __u8 state;
239 /* get_neigh_state not used here because it would daedlock */
240 mutex_lock(&(curr->state_lock));
241 state = curr->state;
242 mutex_unlock(&(curr->state_lock));
244 if (state != NEIGHBOR_STATE_ACTIVE)
245 goto cont2;
247 if (total < offset)
248 goto cont;
250 if (unlikely(buflen - buf_offset - 6 - 2 - curr->addrlen < 0))
251 bufferfull = 1;
253 if (bufferfull)
254 goto cont;
256 put_u16(buf + buf_offset, 1, 1);/* numaddr */
257 buf_offset += 2;
258 put_u16(buf + buf_offset, 2, 1);/* addrtypelen */
259 buf_offset += 2;
260 put_u16(buf + buf_offset, curr->addrlen, 1);/* addren */
261 buf_offset += 2;
262 buf[buf_offset] = 'i'; /* addrtype */
263 buf_offset += 1;
264 buf[buf_offset] = 'd';
265 buf_offset += 1;
266 memcpy(buf + buf_offset, curr->addr, curr->addrlen); /* addr */
267 buf_offset += curr->addrlen;
269 BUG_ON(buf_offset > buflen);
271 cnt++;
273 cont:
274 total++;
275 cont2:
276 currlh = currlh->next;
279 mutex_unlock(&(neighbor_operation_lock));
281 put_u32(p_totalneighs, total, 1);
282 put_u32(p_response_rows, cnt, 1);
284 return buf_offset;
287 void set_last_routdtrip(struct neighbor *nb, unsigned long time)
289 BUG_ON(nb == 0);
291 mutex_lock(&(nb->state_lock));
293 if(likely(nb->state == NEIGHBOR_STATE_ACTIVE) && time_after(time,
294 nb->state_time.last_roundtrip))
295 nb->state_time.last_roundtrip = time;
297 mutex_unlock(&(nb->state_lock));
300 int get_neigh_state(struct neighbor *nb)
302 int ret;
303 int switchedtostalled = 0;
305 BUG_ON(nb == 0);
307 mutex_lock(&(nb->state_lock));
309 if (unlikely(likely(nb->state == NEIGHBOR_STATE_ACTIVE) && unlikely(
310 time_after(nb->state_time.last_roundtrip +
311 msecs_to_jiffies(STALL_START_TIME_MS), jiffies)))) {
312 nb->state = NEIGHBOR_STATE_STALLED;
313 switchedtostalled = 1;
316 ret = nb->state;
318 mutex_unlock(&(nb->state_lock));
320 if (switchedtostalled) {
321 printk(KERN_ERR "switched to stalled");
322 #warning todo reset conns
325 return ret;
328 static struct ping_cookie *find_cookie(struct neighbor *nb, __u32 cookie)
330 int i;
332 for(i=0;i<PING_COOKIES_PER_NEIGH;i++) {
333 if (nb->cookies[i].cookie == cookie)
334 return &(nb->cookies[i]);
336 return 0;
339 void ping_resp(struct neighbor *nb, __u32 cookie, __u32 respdelay)
341 struct ping_cookie *c;
342 int i;
344 __s64 newlatency;
346 mutex_lock(&(nb->pingcookie_lock));
348 c = find_cookie(nb, cookie);
350 if (c == 0)
351 goto out;
353 newlatency = ((((__s64) ((__u32)atomic_read(&(nb->latency)))) * 15 +
354 jiffies_to_usecs(jiffies - c->time) - respdelay) / 16);
355 if (unlikely(newlatency < 0))
356 newlatency = 0;
357 if (unlikely(newlatency > (((__s64)256)*256*256*256 - 1)))
358 newlatency = ((__s64)256)*256*256*256 - 1;
360 atomic_set(&(nb->latency), (__u32) newlatency);
362 c->cookie = 0;
363 nb->ping_intransit--;
365 for(i=0;i<PING_COOKIES_PER_NEIGH;i++) {
366 if (nb->cookies[i].cookie != 0 &&
367 time_before(nb->cookies[i].time, c->time)) {
368 nb->cookies[i].pongs++;
369 if (nb->cookies[i].pongs >= PING_PONGLIMIT) {
370 nb->cookies[i].cookie = 0;
371 nb->cookies[i].pongs = 0;
372 nb->ping_intransit--;
377 mutex_lock(&(nb->state_lock));
379 if (unlikely(nb->state == NEIGHBOR_STATE_INITIAL ||
380 nb->state == NEIGHBOR_STATE_STALLED)) {
381 nb->ping_success++;
383 if (nb->state == NEIGHBOR_STATE_INITIAL) {
384 __u64 jiffies64 = get_jiffies_64();
385 if (nb->state_time.last_state_change == 0)
386 nb->state_time.last_state_change = jiffies64;
387 if (jiffies64 <= (nb->state_time.last_state_change +
388 msecs_to_jiffies(INITIAL_TIME_MS)))
389 goto out2;
392 if (nb->ping_success >= PING_SUCCESS_CNT) {
393 if (nb->state == NEIGHBOR_STATE_INITIAL)
394 printk(KERN_ERR "switched from initial to active");
395 else
396 printk(KERN_ERR "switched from stalled to active");
397 nb->state = NEIGHBOR_STATE_ACTIVE;
398 nb->ping_success = 0;
399 nb->state_time.last_roundtrip = jiffies;
403 out2:
404 mutex_unlock(&(nb->state_lock));
406 out:
407 mutex_unlock(&(nb->pingcookie_lock));
410 __u32 add_ping_req(struct neighbor *nb)
412 struct ping_cookie *c;
413 __u32 i;
415 __u32 cookie;
417 mutex_lock(&(nb->pingcookie_lock));
419 for (i=0;i<PING_COOKIES_PER_NEIGH;i++) {
420 if (nb->cookies[i].cookie == 0)
421 goto found;
424 get_random_bytes((char *) &i, sizeof(i));
425 i = (i % (PING_COOKIES_PER_NEIGH - PING_COOKIES_FIFO)) +
426 PING_COOKIES_FIFO;
428 found:
429 c = &(nb->cookies[i]);
430 c->time = jiffies;
431 c->pongs = 0;
432 nb->lastcookie++;
433 if (unlikely(nb->lastcookie == 0))
434 nb->lastcookie++;
435 c->cookie = nb->lastcookie;
437 nb->ping_intransit++;
439 cookie = c->cookie;
441 mutex_unlock(&(nb->pingcookie_lock));
443 return cookie;
448 * Check additional to the checks and timings already done in kpacket_gen.c
449 * This is primarily to make sure that we do not invalidate other ping cookies
450 * which might still receive responses. It does this by requiring a certain
451 * mimimum delay between pings, depending on how many pings are already in
452 * transit.
454 int time_to_send_ping(struct neighbor *nb)
456 int rc = 1;
458 mutex_lock(&(nb->pingcookie_lock));
459 if (nb->ping_intransit >= PING_COOKIES_NOTHROTTLE) {
460 __u32 mindelay = (((__u32)atomic_read(&(nb->latency)))/1000) <<
461 (nb->ping_intransit + 1 -
462 PING_COOKIES_NOTHROTTLE);
463 if (mindelay > PING_THROTTLE_LIMIT_MS)
464 mindelay = PING_THROTTLE_LIMIT_MS;
466 if (jiffies_to_msecs(jiffies - nb->last_ping_time) < mindelay)
467 rc = 0;
469 mutex_unlock(&(nb->pingcookie_lock));
471 return rc;
474 static void add_neighbor(struct neighbor *nb)
476 struct list_head *currlh = nb_list.next;
478 BUG_ON((nb->addr == 0) != (nb->addrlen == 0));
480 while (currlh != &nb_list) {
481 struct neighbor *curr = container_of(currlh, struct neighbor,
482 nb_list);
484 if (curr->addrlen == nb->addrlen && memcmp(curr->addr, nb->addr,
485 curr->addrlen) == 0)
486 goto already_present;
488 currlh = currlh->next;
490 #warning todo kref
491 list_add_tail(&(nb->nb_list), &nb_list);
492 schedule_controlmsg_timerfunc(nb);
493 setup_timer(&(nb->retrans_timer), retransmit_timerfunc,
494 (unsigned long) nb);
496 if (0) {
497 already_present:
498 kmem_cache_free(nb_slab, nb);
502 static __u32 pull_u32(struct sk_buff *skb, int convbo)
504 char *ptr = cor_pull_skb(skb, 4);
506 __u32 ret = 0;
508 BUG_ON(0 == ptr);
510 ((char *)&ret)[0] = ptr[0];
511 ((char *)&ret)[1] = ptr[1];
512 ((char *)&ret)[2] = ptr[2];
513 ((char *)&ret)[3] = ptr[3];
515 if (convbo)
516 return be32_to_cpu(ret);
517 return ret;
520 static int apply_announce_addaddr(struct neighbor *nb, __u32 cmd, __u32 len,
521 char *cmddata)
523 __u16 addrtypelen;
524 char *addrtype;
525 __u16 addrlen;
526 char *addr;
528 BUG_ON((nb->addr == 0) != (nb->addrlen == 0));
530 if (nb->addr != 0)
531 return 0;
533 if (len < 4)
534 return 0;
536 addrtypelen = be16_to_cpu(*((__u16 *) cmddata));
537 cmddata += 2;
538 len -= 2;
540 if (len < 2)
541 return 0;
543 addrlen = be16_to_cpu(*((__u16 *) cmddata));
544 cmddata += 2;
545 len -= 2;
547 addrtype = cmddata;
548 cmddata += addrtypelen;
549 len -= addrtypelen;
551 addr = cmddata;
552 cmddata += addrlen;
553 len -= addrlen;
555 if (len < 0)
556 return 0;
558 if (get_addrtype(addrtypelen, addrtype) != ADDRTYPE_ID)
559 return 0;
561 nb->addr = kmalloc(addrlen, GFP_KERNEL);
562 if (nb->addr == 0)
563 return 1;
565 memcpy(nb->addr, addr, addrlen);
566 nb->addrlen = addrlen;
568 return 0;
571 static void apply_announce_cmd(struct neighbor *nb, __u32 cmd, __u32 len,
572 char *cmddata)
574 if (cmd == NEIGHCMD_ADDADDR) {
575 apply_announce_addaddr(nb, cmd, len, cmddata);
576 } else {
577 /* ignore unknown cmds */
581 static void apply_announce_cmds(char *msg, __u32 len, struct net_device *dev,
582 char *source_hw)
584 struct neighbor *nb = alloc_neighbor(GFP_KERNEL);
586 if (nb == 0)
587 return;
589 while (len >= 8) {
590 __u32 cmd;
591 __u32 cmdlen;
593 cmd = be32_to_cpu(*((__u32 *) msg));
594 msg += 4;
595 len -= 4;
596 cmdlen = be32_to_cpu(*((__u32 *) msg));
597 msg += 4;
598 len -= 4;
600 BUG_ON(cmdlen > len);
602 apply_announce_cmd(nb, cmd, cmdlen, msg);
604 msg += cmdlen;
605 len -= cmdlen;
608 BUG_ON(len != 0);
610 memcpy(nb->mac, source_hw, MAX_ADDR_LEN);
612 dev_hold(dev);
613 nb->dev = dev;
614 add_neighbor(nb);
618 static int check_announce_cmds(char *msg, __u32 len)
620 while (len >= 8) {
621 __u32 cmd;
622 __u32 cmdlen;
624 cmd = be32_to_cpu(*((__u32 *) msg));
625 msg += 4;
626 len -= 4;
627 cmdlen = be32_to_cpu(*((__u32 *) msg));
628 msg += 4;
629 len -= 4;
631 /* malformated packet */
632 if (cmdlen > len)
633 return 1;
635 msg += cmdlen;
636 len -= cmdlen;
639 if (len != 0)
640 return 1;
642 return 0;
645 static void parse_announce(char *msg, __u32 len, struct net_device *dev,
646 char *source_hw)
648 __u32 min_announce_version;
649 __u32 max_announce_version;
650 __u32 min_cor_version;
651 __u32 max_cor_version;
653 if (len < 16)
654 return;
656 min_announce_version = be32_to_cpu(*((__u32 *) msg));
657 msg += 4;
658 len -= 4;
659 max_announce_version = be32_to_cpu(*((__u32 *) msg));
660 msg += 4;
661 len -= 4;
662 min_cor_version = be32_to_cpu(*((__u32 *) msg));
663 msg += 4;
664 len -= 4;
665 max_cor_version = be32_to_cpu(*((__u32 *) msg));
666 msg += 4;
667 len -= 4;
669 if (min_announce_version != 0)
670 return;
671 if (min_cor_version != 0)
672 return;
673 if (check_announce_cmds(msg, len)) {
674 return;
676 apply_announce_cmds(msg, len, dev, source_hw);
679 struct announce_in {
680 /* lh has to be first */
681 struct list_head lh;
682 struct sk_buff_head skbs; /* sorted by offset */
683 struct net_device *dev;
684 char source_hw[MAX_ADDR_LEN];
685 __u32 announce_proto_version;
686 __u32 packet_version;
687 __u32 total_size;
688 __u32 received_size;
689 __u64 last_received_packet;
692 LIST_HEAD(announce_list);
694 struct kmem_cache *announce_in_slab;
696 static void merge_announce(struct announce_in *ann)
698 char *msg = kmalloc(ann->total_size, GFP_KERNEL);
699 __u32 copy = 0;
701 if (msg == 0) {
702 /* try again when next packet arrives */
703 return;
706 while (copy != ann->total_size) {
707 __u32 currcpy;
708 struct sk_buff *skb;
710 if (skb_queue_empty(&(ann->skbs))) {
711 printk(KERN_ERR "net/cor/neighbor.c: sk_head ran "
712 "empty while merging packets\n");
713 goto free;
716 skb = skb_dequeue(&(ann->skbs));
718 currcpy = skb->len;
720 if (currcpy + copy > ann->total_size)
721 goto free;
723 #warning todo overlapping skbs
724 memcpy(msg + copy, skb->data, currcpy);
725 copy += currcpy;
726 kfree_skb(skb);
729 parse_announce(msg, ann->total_size, ann->dev, ann->source_hw);
731 free:
732 if (msg != 0)
733 kfree(msg);
735 dev_put(ann->dev);
736 list_del(&(ann->lh));
737 kmem_cache_free(announce_in_slab, ann);
740 static int _rcv_announce(struct sk_buff *skb, struct announce_in *ann)
742 struct skb_procstate *ps = skb_pstate(skb);
744 __u32 offset = ps->funcstate.announce.offset;
745 __u32 len = skb->len;
747 __u32 curroffset = 0;
748 __u32 prevoffset = 0;
749 __u32 prevlen = 0;
751 struct sk_buff *curr = ann->skbs.next;
753 if (len + offset > ann->total_size) {
754 /* invalid header */
755 kfree_skb(skb);
756 return 0;
760 * Try to find the right place to insert in the sorted list. This
761 * means to process the list until we find a skb which has a greater
762 * offset, so we can insert before it to keep the sort order. However,
763 * this is complicated by the fact that the new skb must not be inserted
764 * between 2 skbs if there is no data missing in between. So the loop
765 * runs has to keep running until there is either a gap to insert or
766 * we see that this data has already been received.
768 while ((void *) curr != (void *) &(ann->skbs)) {
769 struct skb_procstate *currps = skb_pstate(skb);
771 curroffset = currps->funcstate.announce.offset;
773 if (curroffset > offset && (prevoffset + prevlen) < curroffset)
774 break;
776 prevoffset = curroffset;
777 prevlen = curr->len;
778 curr = curr->next;
780 if ((offset+len) <= (prevoffset+prevlen)) {
781 /* we already have this data */
782 kfree_skb(skb);
783 return 0;
788 * Calculate how much data was really received, by substracting
789 * the bytes we already have.
791 if (unlikely(prevoffset + prevlen > offset)) {
792 len -= (prevoffset + prevlen) - offset;
793 offset = prevoffset + prevlen;
796 if (unlikely((void *) curr != (void *) &(ann->skbs) &&
797 (offset + len) > curroffset))
798 len = curroffset - offset;
800 ann->received_size += len;
801 BUG_ON(ann->received_size > ann->total_size);
802 __skb_queue_before(&(ann->skbs), curr, skb);
803 ann->last_received_packet = get_jiffies_64();
805 if (ann->received_size == ann->total_size)
806 merge_announce(ann);
807 else if (ann->skbs.qlen >= 16)
808 return 1;
810 return 0;
813 void rcv_announce(struct sk_buff *skb)
815 struct skb_procstate *ps = skb_pstate(skb);
816 struct announce_in *curr = 0;
817 struct announce_in *leastactive = 0;
818 __u32 list_size = 0;
820 __u32 announce_proto_version = pull_u32(skb, 1);
821 __u32 packet_version = pull_u32(skb, 1);
822 __u32 total_size = pull_u32(skb, 1);
824 char source_hw[MAX_ADDR_LEN];
825 memset(source_hw, 0, MAX_ADDR_LEN);
826 if (skb->dev->header_ops != 0 &&
827 skb->dev->header_ops->parse != 0)
828 skb->dev->header_ops->parse(skb, source_hw);
830 ps->funcstate.announce.offset = pull_u32(skb, 1);
832 if (total_size > 8192)
833 goto discard;
835 mutex_lock(&(neighbor_operation_lock));
837 if (announce_proto_version != 0)
838 goto discard;
840 curr = (struct announce_in *) announce_list.next;
842 while (((struct list_head *) curr) != &(announce_list)) {
843 list_size++;
844 if (curr->dev == skb->dev &&
845 memcmp(curr->source_hw, source_hw, MAX_ADDR_LEN) == 0 &&
846 curr->announce_proto_version == announce_proto_version &&
847 curr->packet_version == packet_version &&
848 curr->total_size == total_size)
849 goto found;
851 if (leastactive == 0 || curr->last_received_packet <
852 leastactive->last_received_packet)
853 leastactive = curr;
855 curr = (struct announce_in *) curr->lh.next;
858 if (list_size >= 128) {
859 BUG_ON(leastactive == 0);
860 curr = leastactive;
862 curr->last_received_packet = get_jiffies_64();
864 while (!skb_queue_empty(&(curr->skbs))) {
865 struct sk_buff *skb2 = skb_dequeue(&(curr->skbs));
866 kfree_skb(skb2);
869 dev_put(curr->dev);
870 } else {
871 curr = kmem_cache_alloc(announce_in_slab,
872 GFP_KERNEL);
873 if (curr == 0)
874 goto discard;
876 skb_queue_head_init(&(curr->skbs));
877 list_add_tail((struct list_head *) curr, &announce_list);
880 curr->packet_version = packet_version;
881 curr->total_size = total_size;
882 curr->received_size = 0;
883 curr->announce_proto_version = announce_proto_version;
884 curr->dev = skb->dev;
885 dev_hold(curr->dev);
886 memcpy(curr->source_hw, source_hw, MAX_ADDR_LEN);
888 found:
889 if (_rcv_announce(skb, curr)) {
890 list_del((struct list_head *) curr);
891 dev_put(curr->dev);
892 kmem_cache_free(announce_in_slab, curr);
895 if (0) {
896 discard:
897 kfree_skb(skb);
900 mutex_unlock(&(neighbor_operation_lock));
903 struct announce {
904 struct kref ref;
906 __u32 packet_version;
907 char *announce_msg;
908 __u32 announce_msg_len;
911 struct announce *last_announce;
913 struct announce_data {
914 struct delayed_work announce_work;
916 struct net_device *dev;
918 struct announce *ann;
920 struct list_head lh;
922 __u32 curr_announce_msg_offset;
923 __u64 scheduled_announce_timer;
926 static void _splitsend_announce(struct announce_data *ann)
928 struct sk_buff *skb;
929 __u32 packet_size = 256;
930 __u32 remainingdata = ann->ann->announce_msg_len -
931 ann->curr_announce_msg_offset;
932 __u32 headroom = LL_ALLOCATED_SPACE(ann->dev);
933 __u32 overhead = 17 + headroom;
934 char *header;
935 char *ptr;
937 if (remainingdata < packet_size)
938 packet_size = remainingdata;
940 skb = alloc_skb(packet_size + overhead, GFP_KERNEL);
941 if (unlikely(0 == skb))
942 return;
944 skb->protocol = htons(ETH_P_COR);
945 skb->dev = ann->dev;
946 skb_reserve(skb, headroom);
948 if(unlikely(dev_hard_header(skb, ann->dev, ETH_P_COR,
949 ann->dev->broadcast, ann->dev->dev_addr, skb->len) < 0))
950 goto out_err;
952 skb_reset_network_header(skb);
954 header = skb_put(skb, 17);
955 if (unlikely(header == 0))
956 goto out_err;
958 header[0] = PACKET_TYPE_ANNOUNCE;
960 put_u32(header + 1, 0, 1); /* announce proto version */
961 put_u32(header + 5, ann->ann->packet_version, 1); /* packet version */
962 put_u32(header + 9, ann->ann->announce_msg_len, 1); /* total size */
963 put_u32(header + 13, ann->curr_announce_msg_offset, 1); /* offset */
965 ptr = skb_put(skb, packet_size);
966 if (unlikely(ptr == 0))
967 goto out_err;
969 memcpy(ptr, ann->ann->announce_msg + ann->curr_announce_msg_offset, packet_size);
970 dev_queue_xmit(skb);
972 ann->curr_announce_msg_offset += packet_size;
974 if (ann->curr_announce_msg_offset == ann->ann->announce_msg_len)
975 ann->curr_announce_msg_offset = 0;
977 if (0) {
978 out_err:
979 if (skb != 0)
980 kfree_skb(skb);
984 static void announce_free(struct kref *ref)
986 struct announce *ann = container_of(ref, struct announce, ref);
987 kfree(&(ann->announce_msg));
988 kfree(ann);
991 static void splitsend_announce(struct work_struct *work)
993 struct announce_data *ann = container_of(to_delayed_work(work),
994 struct announce_data, announce_work);
995 int reschedule = 0;
997 mutex_lock(&(neighbor_operation_lock));
999 if (ann->dev == 0)
1000 goto out;
1002 reschedule = 1;
1004 if (ann->ann == 0 && last_announce == 0)
1005 goto out;
1007 if (ann->curr_announce_msg_offset == 0 && ann->ann != last_announce) {
1008 if (ann->ann != 0)
1009 kref_put(&(ann->ann->ref), announce_free);
1010 ann->ann = last_announce;
1011 kref_get(&(ann->ann->ref));
1014 _splitsend_announce(ann);
1015 out:
1016 mutex_unlock(&(neighbor_operation_lock));
1018 if (reschedule) {
1019 int target_delay_ms = 500;
1020 int target_delay_jiffies = msecs_to_jiffies(target_delay_ms);
1021 __u64 jiffies = get_jiffies_64();
1022 int delay;
1024 ann->scheduled_announce_timer += target_delay_jiffies;
1026 delay = ann->scheduled_announce_timer - jiffies;
1027 if (delay < 0)
1028 delay = 0;
1030 INIT_DELAYED_WORK(&(ann->announce_work), splitsend_announce);
1031 schedule_delayed_work(&(ann->announce_work), delay);
1035 static struct announce_data *get_announce_by_netdev(struct net_device *dev)
1037 struct list_head *lh = announce_out_list.next;
1039 while (lh != &announce_out_list) {
1040 struct announce_data *curr = (struct announce_data *)(
1041 ((char *) lh) -
1042 offsetof(struct announce_data, lh));
1044 if (curr->dev == dev)
1045 return curr;
1048 return 0;
1051 static void announce_sent_adddev(struct net_device *dev)
1053 struct announce_data *ann;
1055 ann = kmalloc(sizeof(struct announce_data), GFP_KERNEL);
1057 if (ann == 0) {
1058 printk(KERN_ERR "cor cannot allocate memory for sending "
1059 "announces");
1060 return;
1063 memset(ann, 0, sizeof(struct announce_data));
1065 dev_hold(dev);
1066 ann->dev = dev;
1068 mutex_lock(&(neighbor_operation_lock));
1069 list_add_tail(&(ann->lh), &announce_out_list);
1070 mutex_unlock(&(neighbor_operation_lock));
1072 ann->scheduled_announce_timer = get_jiffies_64();
1073 INIT_DELAYED_WORK(&(ann->announce_work), splitsend_announce);
1074 schedule_delayed_work(&(ann->announce_work), 1);
1077 static void announce_sent_rmdev(struct net_device *dev)
1079 struct announce_data *ann;
1081 mutex_lock(&(neighbor_operation_lock));
1083 ann = get_announce_by_netdev(dev);
1085 if (ann == 0)
1086 goto out;
1088 dev_put(ann->dev);
1089 ann->dev = 0;
1091 out:
1092 mutex_unlock(&(neighbor_operation_lock));
1095 int netdev_notify_func(struct notifier_block *not, unsigned long event,
1096 void *ptr)
1098 struct net_device *dev = (struct net_device *) ptr;
1100 switch(event){
1101 case NETDEV_UP:
1102 announce_sent_adddev(dev);
1103 break;
1104 case NETDEV_DOWN:
1105 announce_sent_rmdev(dev);
1106 break;
1107 case NETDEV_REBOOT:
1108 case NETDEV_CHANGE:
1109 case NETDEV_REGISTER:
1110 case NETDEV_UNREGISTER:
1111 case NETDEV_CHANGEMTU:
1112 case NETDEV_CHANGEADDR:
1113 case NETDEV_GOING_DOWN:
1114 case NETDEV_CHANGENAME:
1115 case NETDEV_FEAT_CHANGE:
1116 case NETDEV_BONDING_FAILOVER:
1117 break;
1118 default:
1119 return 1;
1122 return 0;
1125 static int set_announce(char *msg, __u32 len)
1127 struct announce *ann = kmalloc(sizeof(struct announce), GFP_KERNEL);
1129 if (ann == 0) {
1130 kfree(msg);
1131 return 1;
1134 memset(ann, 0, sizeof(struct announce));
1136 ann->announce_msg = msg;
1137 ann->announce_msg_len = len;
1139 kref_init(&(ann->ref));
1141 mutex_lock(&(neighbor_operation_lock));
1143 if (last_announce != 0) {
1144 ann->packet_version = last_announce->packet_version + 1;
1145 kref_put(&(last_announce->ref), announce_free);
1148 last_announce = ann;
1150 mutex_unlock(&(neighbor_operation_lock));
1152 return 0;
1155 static int generate_announce(void)
1157 __u32 addrtypelen = strlen(addrtype);
1159 __u32 hdr_len = 16;
1160 __u32 cmd_hdr_len = 8;
1161 __u32 cmd_len = 2 + 2 + addrtypelen + addrlen;
1163 __u32 len = hdr_len + cmd_hdr_len + cmd_len;
1164 __u32 offset = 0;
1166 char *msg = kmalloc(len, GFP_KERNEL);
1167 if (msg == 0)
1168 return 1;
1170 put_u32(msg + offset, 0, 1); /* min_announce_proto_version */
1171 offset += 4;
1172 put_u32(msg + offset, 0, 1); /* max_announce_proto_version */
1173 offset += 4;
1174 put_u32(msg + offset, 0, 1); /* min_cor_proto_version */
1175 offset += 4;
1176 put_u32(msg + offset, 0, 1); /* max_cor_proto_version */
1177 offset += 4;
1180 put_u32(msg + offset, NEIGHCMD_ADDADDR, 1); /* command */
1181 offset += 4;
1182 put_u32(msg + offset, cmd_len, 1); /* command length */
1183 offset += 4;
1185 /* addrtypelen, addrlen */
1186 put_u16(msg + offset, addrtypelen, 1);
1187 offset += 2;
1188 put_u16(msg + offset, addrlen, 1);
1189 offset += 2;
1191 /* addrtype, addr */
1192 memcpy(msg + offset, addrtype, addrtypelen);
1193 offset += addrtypelen;
1194 memcpy(msg + offset, addr, addrlen);
1195 offset += addrlen;
1197 BUG_ON(offset != len);
1199 return set_announce(msg, len);
1202 int __init cor_neighbor_init(void)
1204 addrlen = 16;
1206 addr = kmalloc(addrlen, GFP_KERNEL);
1207 if (addr == 0)
1208 goto error_free2;
1210 get_random_bytes(addr, addrlen);
1212 nb_slab = kmem_cache_create("cor_neighbor", sizeof(struct neighbor), 8,
1213 0, 0);
1214 announce_in_slab = kmem_cache_create("cor_announce_in",
1215 sizeof(struct announce_in), 8, 0, 0);
1217 if (generate_announce())
1218 goto error_free1;
1220 memset(&netdev_notify, 0, sizeof(netdev_notify));
1221 netdev_notify.notifier_call = netdev_notify_func;
1222 register_netdevice_notifier(&netdev_notify);
1224 return 0;
1226 error_free1:
1227 kfree(addr);
1229 error_free2:
1230 return -ENOMEM;
1233 MODULE_LICENSE("GPL");