matches_skb_connid_seqno bugfix on kernel packets, ref counter is_active is now gone...
[cor_2_6_31.git] / net / cor / neighbor.c
blob8267f84006fecead7ea7f6a9377e5665d18b156f
1 /*
2 * Connection oriented routing
3 * Copyright (C) 2007-2009 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include "cor.h"
23 /**
24 * Splited packet data format:
25 * announce proto version [4]
26 * is 0, may be increased if format changes
27 * packet version [4]
28 * starts with 0, increments every time the data field changes
29 * total size [4]
30 * total data size of all merged packets
31 * offset [4]
32 * used to determine the order when merging the split packet
33 * unit is bytes
34 * [data]
35 * commulative checksum [8] (not yet)
36 * chunk 1 contains the checksum of the data in chunk 1
37 * chunk 2 contains the checksum of the data in chunk 1+2
38 * ...
40 * Data format of the announce packet "data" field:
41 * min_announce_proto_version [4]
42 * max_announce_proto_version [4]
43 * min_cor_proto_version [4]
44 * max_cor_proto_version [4]
45 * versions which are understood
47 * command [4]
48 * commandlength [4]
49 * commanddata [commandlength]
52 /* Commands */
54 #define NEIGHCMD_ADDADDR 1
56 /**
57 * Parameter:
58 * addrtypelen [2]
59 * addrtype [addrtypelen]
60 * addrlen [2]
61 * addr [addrlen]
64 #warning todo
65 /**
66 * "I hear you" data format:
67 * challange [todo]
71 DEFINE_MUTEX(neighbor_operation_lock);
73 char *addrtype = "id";
74 char *addr;
75 int addrlen;
78 LIST_HEAD(nb_list);
79 struct kmem_cache *nb_slab;
81 LIST_HEAD(announce_out_list);
83 struct notifier_block netdev_notify;
86 #define ADDRTYPE_UNKNOWN 0
87 #define ADDRTYPE_ID 1
89 static int get_addrtype(__u32 addrtypelen, char *addrtype)
91 if (addrtypelen == 2 &&
92 (addrtype[0] == 'i' || addrtype[0] == 'I') &&
93 (addrtype[1] == 'd' || addrtype[1] == 'D'))
94 return ADDRTYPE_ID;
96 return ADDRTYPE_UNKNOWN;
99 static void neighbor_free(struct ref_counter *ref)
101 struct neighbor *nb = container_of(ref, struct neighbor, refs);
102 BUG_ON(nb->nb_list.next != 0 || nb->nb_list.prev != 0);
103 if (nb->addr != 0)
104 kfree(nb->addr);
105 nb->addr = 0;
106 if (nb->dev != 0)
107 dev_put(nb->dev);
108 nb->dev = 0;
109 kmem_cache_free(nb_slab, nb);
112 static struct ref_counter_def neighbor_ref = {
113 .free = neighbor_free
116 static struct neighbor *alloc_neighbor(gfp_t allocflags)
118 struct neighbor *nb = kmem_cache_alloc(nb_slab, allocflags);
120 __u32 seqno;
122 if (nb == 0)
123 return 0;
125 memset(nb, 0, sizeof(struct neighbor));
127 ref_counter_init(&(nb->refs), &neighbor_ref);
128 mutex_init(&(nb->cmsg_lock));
129 /*struct control_msg_out *first_cm;
130 struct control_msg_out *last_cm;
131 unsigned long timedue;*/
132 nb->latency = 10;
133 INIT_LIST_HEAD(&(nb->control_msgs_out));
134 atomic_set(&(nb->ooo_packets), 0);
135 get_random_bytes((char *) &seqno, sizeof(seqno));
136 atomic_set(&(nb->kpacket_seqno), seqno);
137 mutex_init(&(nb->conn_list_lock));
138 INIT_LIST_HEAD(&(nb->rcv_conn_list));
139 INIT_LIST_HEAD(&(nb->snd_conn_list));
140 spin_lock_init(&(nb->retrans_lock));
141 spin_lock_init(&(nb->retrans_lock));
142 skb_queue_head_init(&(nb->retrans_list));
144 return nb;
147 struct neighbor *get_neigh_by_mac(struct sk_buff *skb)
149 struct list_head *currlh;
150 struct neighbor *ret = 0;
153 char source_hw[MAX_ADDR_LEN];
154 memset(source_hw, 0, MAX_ADDR_LEN);
155 if (skb->dev->header_ops != 0 &&
156 skb->dev->header_ops->parse != 0)
157 skb->dev->header_ops->parse(skb, source_hw);
159 mutex_lock(&(neighbor_operation_lock));
161 currlh = nb_list.next;
163 while (currlh != &nb_list) {
164 struct neighbor *curr = container_of(currlh, struct neighbor,
165 nb_list);
167 if (memcmp(curr->mac, source_hw, MAX_ADDR_LEN) == 0) {
168 ret = curr;
169 ref_counter_incr(&(ret->refs));
171 goto out;
174 currlh = currlh->next;
177 out:
178 mutex_unlock(&(neighbor_operation_lock));
180 return ret;
183 struct neighbor *find_neigh(__u16 addrtypelen, __u8 *addrtype,
184 __u16 addrlen, __u8 *addr)
186 struct list_head *currlh;
187 struct neighbor *ret = 0;
189 if (get_addrtype(addrtypelen, addrtype) != ADDRTYPE_ID)
190 return 0;
192 mutex_lock(&(neighbor_operation_lock));
194 currlh = nb_list.next;
196 while (currlh != &nb_list) {
197 struct neighbor *curr = container_of(currlh, struct neighbor,
198 nb_list);
200 if (curr->addrlen == addrlen && memcmp(curr->addr, addr,
201 addrlen) == 0) {
202 ret = curr;
203 ref_counter_incr(&(ret->refs));
205 goto out;
208 currlh = currlh->next;
211 out:
212 mutex_unlock(&(neighbor_operation_lock));
214 return ret;
217 __u32 generate_neigh_list(char *buf, __u32 buflen, __u32 limit, __u32 offset)
219 struct list_head *currlh;
221 char *p_totalneighs = buf;
222 char *p_response_rows = buf + 4;
224 __u32 total = 0;
225 __u32 cnt = 0;
227 __u32 buf_offset = 8;
229 BUG_ON(buf == 0);
230 BUG_ON(buflen < 8);
232 mutex_lock(&(neighbor_operation_lock));
234 currlh = nb_list.next;
236 while (currlh != &nb_list) {
237 struct neighbor *curr = container_of(currlh, struct neighbor,
238 nb_list);
240 if (total != cnt)
241 goto cont;
243 if (unlikely(buflen - buf_offset - 6 - 2 - curr->addrlen < 0))
244 goto cont;
246 put_u16(buf + buf_offset, 1, 1);/* numaddr */
247 buf_offset += 2;
248 put_u16(buf + buf_offset, 2, 1);/* addrtypelen */
249 buf_offset += 2;
250 put_u16(buf + buf_offset, curr->addrlen, 1);/* addren */
251 buf_offset += 2;
252 buf[buf_offset] = 'i'; /* addrtype */
253 buf_offset += 1;
254 buf[buf_offset] = 'd';
255 buf_offset += 1;
256 memcpy(buf + buf_offset, curr->addr, curr->addrlen); /* addr */
257 buf_offset += curr->addrlen;
259 BUG_ON(buf_offset > buflen);
261 cnt++;
263 cont:
264 total++;
265 currlh = currlh->next;
268 mutex_unlock(&(neighbor_operation_lock));
270 put_u32(p_totalneighs, total, 1);
271 put_u32(p_response_rows, cnt, 1);
273 return buf_offset;
276 static void add_neighbor(struct neighbor *nb)
278 struct list_head *currlh = nb_list.next;
280 BUG_ON((nb->addr == 0) != (nb->addrlen == 0));
282 while (currlh != &nb_list) {
283 struct neighbor *curr = container_of(currlh, struct neighbor,
284 nb_list);
286 if (curr->addrlen == nb->addrlen && memcmp(curr->addr, nb->addr,
287 curr->addrlen) == 0)
288 goto already_present;
290 currlh = currlh->next;
292 #warning todo refcnt
293 list_add_tail(&(nb->nb_list), &nb_list);
294 schedule_controlmsg_timerfunc(nb);
295 setup_timer(&(nb->retrans_timer), retransmit_timerfunc,
296 (unsigned long) nb);
298 if (0) {
299 already_present:
300 kmem_cache_free(nb_slab, nb);
304 static __u32 pull_u32(struct sk_buff *skb, int convbo)
306 char *ptr = cor_pull_skb(skb, 4);
308 __u32 ret = 0;
310 BUG_ON(0 == ptr);
312 ((char *)&ret)[0] = ptr[0];
313 ((char *)&ret)[1] = ptr[1];
314 ((char *)&ret)[2] = ptr[2];
315 ((char *)&ret)[3] = ptr[3];
317 if (convbo)
318 return be32_to_cpu(ret);
319 return ret;
322 static int apply_announce_addaddr(struct neighbor *nb, __u32 cmd, __u32 len,
323 char *cmddata)
325 __u16 addrtypelen;
326 char *addrtype;
327 __u16 addrlen;
328 char *addr;
330 BUG_ON((nb->addr == 0) != (nb->addrlen == 0));
332 if (nb->addr != 0)
333 return 0;
335 if (len < 4)
336 return 0;
338 addrtypelen = be16_to_cpu(*((__u16 *) cmddata));
339 cmddata += 2;
340 len -= 2;
342 if (len < 2)
343 return 0;
345 addrlen = be16_to_cpu(*((__u16 *) cmddata));
346 cmddata += 2;
347 len -= 2;
349 addrtype = cmddata;
350 cmddata += addrtypelen;
351 len -= addrtypelen;
353 addr = cmddata;
354 cmddata += addrlen;
355 len -= addrlen;
357 if (len < 0)
358 return 0;
360 if (get_addrtype(addrtypelen, addrtype) != ADDRTYPE_ID)
361 return 0;
363 nb->addr = kmalloc(addrlen, GFP_KERNEL);
364 if (nb->addr == 0)
365 return 1;
367 memcpy(nb->addr, addr, addrlen);
368 nb->addrlen = addrlen;
370 return 0;
373 static void apply_announce_cmd(struct neighbor *nb, __u32 cmd, __u32 len,
374 char *cmddata)
376 if (cmd == NEIGHCMD_ADDADDR) {
377 apply_announce_addaddr(nb, cmd, len, cmddata);
378 } else {
379 /* ignore unknown cmds */
383 static void apply_announce_cmds(char *msg, __u32 len, struct net_device *dev,
384 char *source_hw)
386 struct neighbor *nb = alloc_neighbor(GFP_KERNEL);
388 if (nb == 0)
389 return;
391 while (len >= 8) {
392 __u32 cmd;
393 __u32 cmdlen;
395 cmd = be32_to_cpu(*((__u32 *) msg));
396 msg += 4;
397 len -= 4;
398 cmdlen = be32_to_cpu(*((__u32 *) msg));
399 msg += 4;
400 len -= 4;
402 BUG_ON(cmdlen > len);
404 apply_announce_cmd(nb, cmd, cmdlen, msg);
406 msg += cmdlen;
407 len -= cmdlen;
410 BUG_ON(len != 0);
412 memcpy(nb->mac, source_hw, MAX_ADDR_LEN);
414 dev_hold(dev);
415 nb->dev = dev;
416 add_neighbor(nb);
420 static int check_announce_cmds(char *msg, __u32 len)
422 while (len >= 8) {
423 __u32 cmd;
424 __u32 cmdlen;
426 cmd = be32_to_cpu(*((__u32 *) msg));
427 msg += 4;
428 len -= 4;
429 cmdlen = be32_to_cpu(*((__u32 *) msg));
430 msg += 4;
431 len -= 4;
433 /* malformated packet */
434 if (cmdlen > len)
435 return 1;
437 msg += cmdlen;
438 len -= cmdlen;
441 if (len != 0)
442 return 1;
444 return 0;
447 static void parse_announce(char *msg, __u32 len, struct net_device *dev,
448 char *source_hw)
450 __u32 min_announce_version;
451 __u32 max_announce_version;
452 __u32 min_cor_version;
453 __u32 max_cor_version;
455 if (len < 16)
456 return;
458 min_announce_version = be32_to_cpu(*((__u32 *) msg));
459 msg += 4;
460 len -= 4;
461 max_announce_version = be32_to_cpu(*((__u32 *) msg));
462 msg += 4;
463 len -= 4;
464 min_cor_version = be32_to_cpu(*((__u32 *) msg));
465 msg += 4;
466 len -= 4;
467 max_cor_version = be32_to_cpu(*((__u32 *) msg));
468 msg += 4;
469 len -= 4;
471 if (min_announce_version != 0)
472 return;
473 if (min_cor_version != 0)
474 return;
475 if (check_announce_cmds(msg, len)) {
476 return;
478 apply_announce_cmds(msg, len, dev, source_hw);
481 struct announce_in {
482 /* lh has to be first */
483 struct list_head lh;
484 struct sk_buff_head skbs; /* sorted by offset */
485 struct net_device *dev;
486 char source_hw[MAX_ADDR_LEN];
487 __u32 announce_proto_version;
488 __u32 packet_version;
489 __u32 total_size;
490 __u32 received_size;
491 __u64 last_received_packet;
494 LIST_HEAD(announce_list);
496 struct kmem_cache *announce_in_slab;
498 static void merge_announce(struct announce_in *ann)
500 char *msg = kmalloc(ann->total_size, GFP_KERNEL);
501 __u32 copy = 0;
503 if (msg == 0) {
504 /* try again when next packet arrives */
505 return;
508 while (copy != ann->total_size) {
509 __u32 currcpy;
510 struct sk_buff *skb;
512 if (skb_queue_empty(&(ann->skbs))) {
513 printk(KERN_ERR "net/cor/neighbor.c: sk_head ran "
514 "empty while merging packets\n");
515 goto free;
518 skb = skb_dequeue(&(ann->skbs));
520 currcpy = skb->len;
522 if (currcpy + copy > ann->total_size)
523 goto free;
525 #warning todo overlapping skbs
526 memcpy(msg + copy, skb->data, currcpy);
527 copy += currcpy;
528 kfree_skb(skb);
531 parse_announce(msg, ann->total_size, ann->dev, ann->source_hw);
533 free:
534 if (msg != 0)
535 kfree(msg);
537 dev_put(ann->dev);
538 list_del(&(ann->lh));
539 kmem_cache_free(announce_in_slab, ann);
542 static int _rcv_announce(struct sk_buff *skb, struct announce_in *ann)
544 struct skb_procstate *ps = skb_pstate(skb);
546 __u32 offset = ps->funcstate.announce.offset;
547 __u32 len = skb->len;
549 __u32 curroffset = 0;
550 __u32 prevoffset = 0;
551 __u32 prevlen = 0;
553 struct sk_buff *curr = ann->skbs.next;
555 if (len + offset > ann->total_size) {
556 /* invalid header */
557 kfree_skb(skb);
558 return 0;
562 * Try to find the right place to insert in the sorted list. This
563 * means to process the list until we find a skb which has a greater
564 * offset, so we can insert before it to keep the sort order. However,
565 * this is complicated by the fact that the new skb must not be inserted
566 * between 2 skbs if there is no data missing in between. So the loop
567 * runs has to keep running until there is either a gap to insert or
568 * we see that this data has already been received.
570 while ((void *) curr != (void *) &(ann->skbs)) {
571 struct skb_procstate *currps = skb_pstate(skb);
573 curroffset = currps->funcstate.announce.offset;
575 if (curroffset > offset && (prevoffset + prevlen) < curroffset)
576 break;
578 prevoffset = curroffset;
579 prevlen = curr->len;
580 curr = curr->next;
582 if ((offset+len) <= (prevoffset+prevlen)) {
583 /* we already have this data */
584 kfree_skb(skb);
585 return 0;
590 * Calculate how much data was really received, by substracting
591 * the bytes we already have.
593 if (unlikely(prevoffset + prevlen > offset)) {
594 len -= (prevoffset + prevlen) - offset;
595 offset = prevoffset + prevlen;
598 if (unlikely((void *) curr != (void *) &(ann->skbs) &&
599 (offset + len) > curroffset))
600 len = curroffset - offset;
602 ann->received_size += len;
603 BUG_ON(ann->received_size > ann->total_size);
604 __skb_queue_before(&(ann->skbs), curr, skb);
605 ann->last_received_packet = get_jiffies_64();
607 if (ann->received_size == ann->total_size)
608 merge_announce(ann);
609 else if (ann->skbs.qlen >= 16)
610 return 1;
612 return 0;
615 void rcv_announce(struct sk_buff *skb)
617 struct skb_procstate *ps = skb_pstate(skb);
618 struct announce_in *curr = 0;
619 struct announce_in *leastactive = 0;
620 __u32 list_size = 0;
622 __u32 announce_proto_version = pull_u32(skb, 1);
623 __u32 packet_version = pull_u32(skb, 1);
624 __u32 total_size = pull_u32(skb, 1);
626 char source_hw[MAX_ADDR_LEN];
627 memset(source_hw, 0, MAX_ADDR_LEN);
628 if (skb->dev->header_ops != 0 &&
629 skb->dev->header_ops->parse != 0)
630 skb->dev->header_ops->parse(skb, source_hw);
632 ps->funcstate.announce.offset = pull_u32(skb, 1);
634 if (total_size > 8192)
635 goto discard;
637 mutex_lock(&(neighbor_operation_lock));
639 if (announce_proto_version != 0)
640 goto discard;
642 curr = (struct announce_in *) announce_list.next;
644 while (((struct list_head *) curr) != &(announce_list)) {
645 list_size++;
646 if (curr->dev == skb->dev &&
647 memcmp(curr->source_hw, source_hw, MAX_ADDR_LEN) == 0 &&
648 curr->announce_proto_version == announce_proto_version &&
649 curr->packet_version == packet_version &&
650 curr->total_size == total_size)
651 goto found;
653 if (leastactive == 0 || curr->last_received_packet <
654 leastactive->last_received_packet)
655 leastactive = curr;
657 curr = (struct announce_in *) curr->lh.next;
660 if (list_size >= 128) {
661 BUG_ON(leastactive == 0);
662 curr = leastactive;
664 curr->last_received_packet = get_jiffies_64();
666 while (!skb_queue_empty(&(curr->skbs))) {
667 struct sk_buff *skb2 = skb_dequeue(&(curr->skbs));
668 kfree_skb(skb2);
671 dev_put(curr->dev);
672 } else {
673 curr = kmem_cache_alloc(announce_in_slab,
674 GFP_KERNEL);
675 if (curr == 0)
676 goto discard;
678 skb_queue_head_init(&(curr->skbs));
679 list_add_tail((struct list_head *) curr, &announce_list);
682 curr->packet_version = packet_version;
683 curr->total_size = total_size;
684 curr->received_size = 0;
685 curr->announce_proto_version = announce_proto_version;
686 curr->dev = skb->dev;
687 dev_hold(curr->dev);
688 memcpy(curr->source_hw, source_hw, MAX_ADDR_LEN);
690 found:
691 if (_rcv_announce(skb, curr)) {
692 list_del((struct list_head *) curr);
693 dev_put(curr->dev);
694 kmem_cache_free(announce_in_slab, curr);
697 if (0) {
698 discard:
699 kfree_skb(skb);
702 mutex_unlock(&(neighbor_operation_lock));
705 struct announce {
706 struct ref_counter refs;
708 __u32 packet_version;
709 char *announce_msg;
710 __u32 announce_msg_len;
713 struct announce *last_announce;
715 struct announce_data {
716 struct delayed_work announce_work;
718 struct net_device *dev;
720 struct announce *ann;
722 struct list_head lh;
724 __u32 curr_announce_msg_offset;
725 __u64 scheduled_announce_timer;
728 static void _splitsend_announce(struct announce_data *ann)
730 struct sk_buff *skb;
731 __u32 packet_size = 256;
732 __u32 remainingdata = ann->ann->announce_msg_len -
733 ann->curr_announce_msg_offset;
734 __u32 headroom = LL_ALLOCATED_SPACE(ann->dev);
735 __u32 overhead = 17 + headroom;
736 char *header;
737 char *ptr;
739 if (remainingdata < packet_size)
740 packet_size = remainingdata;
742 skb = alloc_skb(packet_size + overhead, GFP_KERNEL);
743 if (unlikely(0 == skb))
744 return;
746 skb->protocol = htons(ETH_P_COR);
747 skb->dev = ann->dev;
748 skb_reserve(skb, headroom);
750 if(unlikely(dev_hard_header(skb, ann->dev, ETH_P_COR,
751 ann->dev->broadcast, ann->dev->dev_addr, skb->len) < 0))
752 goto out_err;
754 skb_reset_network_header(skb);
756 header = skb_put(skb, 17);
757 if (unlikely(header == 0))
758 goto out_err;
760 header[0] = PACKET_TYPE_ANNOUNCE;
762 put_u32(header + 1, 0, 1); /* announce proto version */
763 put_u32(header + 5, ann->ann->packet_version, 1); /* packet version */
764 put_u32(header + 9, ann->ann->announce_msg_len, 1); /* total size */
765 put_u32(header + 13, ann->curr_announce_msg_offset, 1); /* offset */
767 ptr = skb_put(skb, packet_size);
768 if (unlikely(ptr == 0))
769 goto out_err;
771 memcpy(ptr, ann->ann->announce_msg + ann->curr_announce_msg_offset, packet_size);
772 dev_queue_xmit(skb);
774 ann->curr_announce_msg_offset += packet_size;
776 if (ann->curr_announce_msg_offset == ann->ann->announce_msg_len)
777 ann->curr_announce_msg_offset = 0;
779 if (0) {
780 out_err:
781 if (skb != 0)
782 kfree_skb(skb);
786 static void splitsend_announce(struct work_struct *work)
788 struct announce_data *ann = container_of(to_delayed_work(work),
789 struct announce_data, announce_work);
790 int reschedule = 0;
792 mutex_lock(&(neighbor_operation_lock));
794 if (ann->dev == 0)
795 goto out;
797 reschedule = 1;
799 if (ann->ann == 0 && last_announce == 0)
800 goto out;
802 if (ann->curr_announce_msg_offset == 0 && ann->ann != last_announce) {
803 if (ann->ann != 0)
804 ref_counter_decr(&(ann->ann->refs));
805 ann->ann = last_announce;
806 ref_counter_incr(&(ann->ann->refs));
809 _splitsend_announce(ann);
810 out:
811 mutex_unlock(&(neighbor_operation_lock));
813 if (reschedule) {
814 int target_delay_ms = 500;
815 int target_delay_jiffies = msecs_to_jiffies(target_delay_ms);
816 __u64 jiffies = get_jiffies_64();
817 int delay;
819 ann->scheduled_announce_timer += target_delay_jiffies;
821 delay = ann->scheduled_announce_timer - jiffies;
822 if (delay < 0)
823 delay = 0;
825 INIT_DELAYED_WORK(&(ann->announce_work), splitsend_announce);
826 schedule_delayed_work(&(ann->announce_work), delay);
830 static void announce_free(struct ref_counter *ref)
832 struct announce *ann = container_of(ref, struct announce, refs);
833 kfree(&(ann->announce_msg));
834 kfree(ann);
837 static struct ref_counter_def announce_ref = {
838 .free = announce_free
841 static struct announce_data *get_announce_by_netdev(struct net_device *dev)
843 struct list_head *lh = announce_out_list.next;
845 while (lh != &announce_out_list) {
846 struct announce_data *curr = (struct announce_data *)(
847 ((char *) lh) -
848 offsetof(struct announce_data, lh));
850 if (curr->dev == dev)
851 return curr;
854 return 0;
857 static void announce_sent_adddev(struct net_device *dev)
859 struct announce_data *ann;
861 ann = kmalloc(sizeof(struct announce_data), GFP_KERNEL);
863 if (ann == 0) {
864 printk(KERN_ERR "cor cannot allocate memory for sending "
865 "announces");
866 return;
869 memset(ann, 0, sizeof(struct announce_data));
871 dev_hold(dev);
872 ann->dev = dev;
874 mutex_lock(&(neighbor_operation_lock));
875 list_add_tail(&(ann->lh), &announce_out_list);
876 mutex_unlock(&(neighbor_operation_lock));
878 ann->scheduled_announce_timer = get_jiffies_64();
879 INIT_DELAYED_WORK(&(ann->announce_work), splitsend_announce);
880 schedule_delayed_work(&(ann->announce_work), 1);
883 static void announce_sent_rmdev(struct net_device *dev)
885 struct announce_data *ann;
887 mutex_lock(&(neighbor_operation_lock));
889 ann = get_announce_by_netdev(dev);
891 if (ann == 0)
892 goto out;
894 dev_put(ann->dev);
895 ann->dev = 0;
897 out:
898 mutex_unlock(&(neighbor_operation_lock));
901 int netdev_notify_func(struct notifier_block *not, unsigned long event,
902 void *ptr)
904 struct net_device *dev = (struct net_device *) ptr;
906 switch(event){
907 case NETDEV_UP:
908 announce_sent_adddev(dev);
909 break;
910 case NETDEV_DOWN:
911 announce_sent_rmdev(dev);
912 break;
913 case NETDEV_REBOOT:
914 case NETDEV_CHANGE:
915 case NETDEV_REGISTER:
916 case NETDEV_UNREGISTER:
917 case NETDEV_CHANGEMTU:
918 case NETDEV_CHANGEADDR:
919 case NETDEV_GOING_DOWN:
920 case NETDEV_CHANGENAME:
921 case NETDEV_FEAT_CHANGE:
922 case NETDEV_BONDING_FAILOVER:
923 break;
924 default:
925 return 1;
928 return 0;
931 static int set_announce(char *msg, __u32 len)
933 struct announce *ann = kmalloc(sizeof(struct announce), GFP_KERNEL);
935 if (ann == 0) {
936 kfree(msg);
937 return 1;
940 memset(ann, 0, sizeof(struct announce));
942 ann->announce_msg = msg;
943 ann->announce_msg_len = len;
945 ref_counter_init(&(ann->refs), &announce_ref);
947 mutex_lock(&(neighbor_operation_lock));
949 if (last_announce != 0) {
950 ann->packet_version = last_announce->packet_version + 1;
951 ref_counter_decr(&(last_announce->refs));
954 last_announce = ann;
956 mutex_unlock(&(neighbor_operation_lock));
958 return 0;
961 static int generate_announce(void)
963 __u32 addrtypelen = strlen(addrtype);
965 __u32 hdr_len = 16;
966 __u32 cmd_hdr_len = 8;
967 __u32 cmd_len = 2 + 2 + addrtypelen + addrlen;
969 __u32 len = hdr_len + cmd_hdr_len + cmd_len;
970 __u32 offset = 0;
972 char *msg = kmalloc(len, GFP_KERNEL);
973 if (msg == 0)
974 return 1;
976 put_u32(msg + offset, 0, 1); /* min_announce_proto_version */
977 offset += 4;
978 put_u32(msg + offset, 0, 1); /* max_announce_proto_version */
979 offset += 4;
980 put_u32(msg + offset, 0, 1); /* min_cor_proto_version */
981 offset += 4;
982 put_u32(msg + offset, 0, 1); /* max_cor_proto_version */
983 offset += 4;
986 put_u32(msg + offset, NEIGHCMD_ADDADDR, 1); /* command */
987 offset += 4;
988 put_u32(msg + offset, cmd_len, 1); /* command length */
989 offset += 4;
991 /* addrtypelen, addrlen */
992 put_u16(msg + offset, addrtypelen, 1);
993 offset += 2;
994 put_u16(msg + offset, addrlen, 1);
995 offset += 2;
997 /* addrtype, addr */
998 memcpy(msg + offset, addrtype, addrtypelen);
999 offset += addrtypelen;
1000 memcpy(msg + offset, addr, addrlen);
1001 offset += addrlen;
1003 BUG_ON(offset != len);
1005 return set_announce(msg, len);
1008 int __init cor_neighbor_init(void)
1010 addrlen = 16;
1012 addr = kmalloc(addrlen, GFP_KERNEL);
1013 if (addr == 0)
1014 goto error_free2;
1016 get_random_bytes(addr, addrlen);
1018 nb_slab = kmem_cache_create("cor_neighbor", sizeof(struct neighbor), 8,
1019 0, 0);
1020 announce_in_slab = kmem_cache_create("cor_announce_in",
1021 sizeof(struct announce_in), 8, 0, 0);
1023 if (generate_announce())
1024 goto error_free1;
1026 memset(&netdev_notify, 0, sizeof(netdev_notify));
1027 netdev_notify.notifier_call = netdev_notify_func;
1028 register_netdevice_notifier(&netdev_notify);
1030 return 0;
1032 error_free1:
1033 kfree(addr);
1035 error_free2:
1036 return -ENOMEM;
1039 MODULE_LICENSE("GPL");