limit of out of order packets and receive workqueue size
[cor_2_6_31.git] / net / cor / neighbor.c
blob60c38819831737a41b7aa437829f6c6c48b7c64b
1 /*
2 * Connection oriented routing
3 * Copyright (C) 2007-2009 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include "cor.h"
23 /**
24 * Splited packet data format:
25 * announce proto version [4]
26 * is 0, may be increased if format changes
27 * packet version [4]
28 * starts with 0, increments every time the data field changes
29 * total size [4]
30 * total data size of all merged packets
31 * offset [4]
32 * used to determine the order when merging the split packet
33 * unit is bytes
34 * [data]
35 * commulative checksum [8] (not yet)
36 * chunk 1 contains the checksum of the data in chunk 1
37 * chunk 2 contains the checksum of the data in chunk 1+2
38 * ...
40 * Data format of the announce packet "data" field:
41 * min_announce_proto_version [4]
42 * max_announce_proto_version [4]
43 * min_cor_proto_version [4]
44 * max_cor_proto_version [4]
45 * versions which are understood
47 * command [4]
48 * commandlength [4]
49 * commanddata [commandlength]
52 /* Commands */
54 #define NEIGHCMD_ADDADDR 1
56 /**
57 * Parameter:
58 * addrtypelen [2]
59 * addrtype [addrtypelen]
60 * addrlen [2]
61 * addr [addrlen]
64 #warning todo
65 /**
66 * "I hear you" data format:
67 * challange [todo]
71 DEFINE_MUTEX(neighbor_operation_lock);
73 char *addrtype = "id";
74 char *addr;
75 int addrlen;
78 LIST_HEAD(nb_list);
79 struct kmem_cache *nb_slab;
81 LIST_HEAD(announce_out_list);
83 struct notifier_block netdev_notify;
86 #define ADDRTYPE_UNKNOWN 0
87 #define ADDRTYPE_ID 1
89 static int get_addrtype(__u32 addrtypelen, char *addrtype)
91 if (addrtypelen == 2 &&
92 (addrtype[0] == 'i' || addrtype[0] == 'I') &&
93 (addrtype[1] == 'd' || addrtype[1] == 'D'))
94 return ADDRTYPE_ID;
96 return ADDRTYPE_UNKNOWN;
99 static void neighbor_free(struct ref_counter *ref)
101 struct neighbor *nb = container_of(ref, struct neighbor, refs);
102 BUG_ON(nb->nb_list.next != 0 || nb->nb_list.prev != 0);
103 if (nb->addr != 0)
104 kfree(nb->addr);
105 nb->addr = 0;
106 if (nb->dev != 0)
107 dev_put(nb->dev);
108 nb->dev = 0;
109 kmem_cache_free(nb_slab, nb);
112 static struct ref_counter_def neighbor_ref = {
113 .free = neighbor_free
116 static struct neighbor *alloc_neighbor(gfp_t allocflags)
118 struct neighbor *nb = kmem_cache_alloc(nb_slab, allocflags);
120 if (nb == 0)
121 return 0;
123 memset(nb, 0, sizeof(struct neighbor));
125 ref_counter_init(&(nb->refs), &neighbor_ref);
126 mutex_init(&(nb->cmsg_lock));
127 /*struct control_msg_out *first_cm;
128 struct control_msg_out *last_cm;
129 unsigned long timedue;*/
130 nb->latency = 10;
131 INIT_LIST_HEAD(&(nb->control_msgs_out));
132 atomic_set(&(nb->ooo_packets), 0);
133 mutex_init(&(nb->conn_list_lock));
134 INIT_LIST_HEAD(&(nb->rcv_conn_list));
135 INIT_LIST_HEAD(&(nb->snd_conn_list));
136 spin_lock_init(&(nb->retrans_lock));
137 spin_lock_init(&(nb->retrans_lock));
138 skb_queue_head_init(&(nb->retrans_list));
140 return nb;
143 static void add_neighbor(struct neighbor *nb)
145 struct list_head *currlh = nb_list.next;
147 BUG_ON((nb->addr == 0) != (nb->addrlen == 0));
149 while (currlh != &nb_list) {
150 struct neighbor *curr = container_of(currlh, struct neighbor,
151 nb_list);
153 if (curr->addrlen == nb->addrlen && memcmp(curr->addr, nb->addr,
154 curr->addrlen) == 0)
155 goto already_present;
157 currlh = currlh->next;
160 #warning todo refcnt
161 list_add_tail(&(nb->nb_list), &nb_list);
162 schedule_controlmsg_timerfunc(nb);
163 setup_timer(&(nb->retrans_timer), retransmit_timerfunc,
164 (unsigned long) nb);
166 if (0) {
167 already_present:
168 kmem_cache_free(nb_slab, nb);
172 struct neighbor *find_neigh(__u16 addrtypelen, __u8 *addrtype,
173 __u16 addrlen, __u8 *addr)
175 struct list_head *currlh;
176 struct neighbor *ret = 0;
178 if (get_addrtype(addrtypelen, addrtype) != ADDRTYPE_ID)
179 return 0;
181 mutex_lock(&(neighbor_operation_lock));
183 currlh = nb_list.next;
185 while (currlh != &nb_list) {
186 struct neighbor *curr = container_of(currlh, struct neighbor,
187 nb_list);
189 if (curr->addrlen == addrlen && memcmp(curr->addr, addr,
190 addrlen) == 0) {
191 #warning todo refcnt
192 ret = curr;
193 goto out;
196 currlh = currlh->next;
199 out:
200 mutex_unlock(&(neighbor_operation_lock));
202 return ret;
205 __u32 generate_neigh_list(char *buf, __u32 buflen, __u32 limit, __u32 offset)
207 struct list_head *currlh;
209 char *p_totalneighs = buf;
210 char *p_response_rows = buf + 4;
212 __u32 total = 0;
213 __u32 cnt = 0;
215 __u32 buf_offset = 8;
217 BUG_ON(buf == 0);
218 BUG_ON(buflen < 8);
220 mutex_lock(&(neighbor_operation_lock));
222 currlh = nb_list.next;
224 while (currlh != &nb_list) {
225 struct neighbor *curr = container_of(currlh, struct neighbor,
226 nb_list);
228 if (total != cnt)
229 goto cont;
231 if (unlikely(buflen - buf_offset - 6 - 2 - curr->addrlen < 0))
232 goto cont;
234 put_u16(buf + buf_offset, 1, 1);/* numaddr */
235 buf_offset += 2;
236 put_u16(buf + buf_offset, 2, 1);/* addrtypelen */
237 buf_offset += 2;
238 put_u16(buf + buf_offset, curr->addrlen, 1);/* addren */
239 buf_offset += 2;
240 buf[buf_offset] = 'i'; /* addrtype */
241 buf_offset += 1;
242 buf[buf_offset] = 'd';
243 buf_offset += 1;
244 memcpy(buf + buf_offset, curr->addr, curr->addrlen); /* addr */
245 buf_offset += curr->addrlen;
247 BUG_ON(buf_offset > buflen);
249 cnt++;
251 cont:
252 total++;
253 currlh = currlh->next;
256 mutex_unlock(&(neighbor_operation_lock));
258 put_u32(p_totalneighs, total, 1);
259 put_u32(p_response_rows, cnt, 1);
261 return buf_offset;
264 static __u32 pull_u32(struct sk_buff *skb, int convbo)
266 char *ptr = cor_pull_skb(skb, 4);
268 __u32 ret = 0;
270 BUG_ON(0 == ptr);
272 ((char *)&ret)[0] = ptr[0];
273 ((char *)&ret)[1] = ptr[1];
274 ((char *)&ret)[2] = ptr[2];
275 ((char *)&ret)[3] = ptr[3];
277 if (convbo)
278 return be32_to_cpu(ret);
279 return ret;
282 static int apply_announce_addaddr(struct neighbor *nb, __u32 cmd, __u32 len,
283 char *cmddata)
285 __u16 addrtypelen;
286 char *addrtype;
287 __u16 addrlen;
288 char *addr;
290 BUG_ON((nb->addr == 0) != (nb->addrlen == 0));
292 if (nb->addr != 0)
293 return 0;
295 if (len < 4)
296 return 0;
298 addrtypelen = be16_to_cpu(*((__u16 *) cmddata));
299 cmddata += 2;
300 len -= 2;
302 if (len < 2)
303 return 0;
305 addrlen = be16_to_cpu(*((__u16 *) cmddata));
306 cmddata += 2;
307 len -= 2;
309 addrtype = cmddata;
310 cmddata += addrtypelen;
311 len -= addrtypelen;
313 addr = cmddata;
314 cmddata += addrlen;
315 len -= addrlen;
317 if (len < 0)
318 return 0;
320 if (get_addrtype(addrtypelen, addrtype) != ADDRTYPE_ID)
321 return 0;
323 nb->addr = kmalloc(addrlen, GFP_KERNEL);
324 if (nb->addr == 0)
325 return 1;
327 memcpy(nb->addr, addr, addrlen);
328 nb->addrlen = addrlen;
330 return 0;
333 static void apply_announce_cmd(struct neighbor *nb, __u32 cmd, __u32 len,
334 char *cmddata)
336 if (cmd == NEIGHCMD_ADDADDR) {
337 apply_announce_addaddr(nb, cmd, len, cmddata);
338 } else {
339 /* ignore unknown cmds */
343 static void apply_announce_cmds(char *msg, __u32 len, struct net_device *dev,
344 char *source_hw)
346 struct neighbor *nb = alloc_neighbor(GFP_KERNEL);
348 if (nb == 0)
349 return;
351 while (len >= 8) {
352 __u32 cmd;
353 __u32 cmdlen;
355 cmd = be32_to_cpu(*((__u32 *) msg));
356 msg += 4;
357 len -= 4;
358 cmdlen = be32_to_cpu(*((__u32 *) msg));
359 msg += 4;
360 len -= 4;
362 BUG_ON(cmdlen > len);
364 apply_announce_cmd(nb, cmd, cmdlen, msg);
366 msg += cmdlen;
367 len -= cmdlen;
370 BUG_ON(len != 0);
372 dev_hold(dev);
373 nb->dev = dev;
374 add_neighbor(nb);
378 static int check_announce_cmds(char *msg, __u32 len)
380 while (len >= 8) {
381 __u32 cmd;
382 __u32 cmdlen;
384 cmd = be32_to_cpu(*((__u32 *) msg));
385 msg += 4;
386 len -= 4;
387 cmdlen = be32_to_cpu(*((__u32 *) msg));
388 msg += 4;
389 len -= 4;
391 /* malformated packet */
392 if (cmdlen > len)
393 return 1;
395 msg += cmdlen;
396 len -= cmdlen;
399 if (len != 0)
400 return 1;
402 return 0;
405 static void parse_announce(char *msg, __u32 len, struct net_device *dev,
406 char *source_hw)
408 __u32 min_announce_version;
409 __u32 max_announce_version;
410 __u32 min_cor_version;
411 __u32 max_cor_version;
413 if (len < 16)
414 return;
416 min_announce_version = be32_to_cpu(*((__u32 *) msg));
417 msg += 4;
418 len -= 4;
419 max_announce_version = be32_to_cpu(*((__u32 *) msg));
420 msg += 4;
421 len -= 4;
422 min_cor_version = be32_to_cpu(*((__u32 *) msg));
423 msg += 4;
424 len -= 4;
425 max_cor_version = be32_to_cpu(*((__u32 *) msg));
426 msg += 4;
427 len -= 4;
429 if (min_announce_version != 0)
430 return;
431 if (min_cor_version != 0)
432 return;
433 if (check_announce_cmds(msg, len)) {
434 return;
436 apply_announce_cmds(msg, len, dev, source_hw);
439 struct announce_in {
440 /* lh has to be first */
441 struct list_head lh;
442 struct sk_buff_head skbs; /* sorted by offset */
443 struct net_device *dev;
444 char source_hw[MAX_ADDR_LEN];
445 __u32 announce_proto_version;
446 __u32 packet_version;
447 __u32 total_size;
448 __u32 received_size;
449 __u64 last_received_packet;
452 LIST_HEAD(announce_list);
454 struct kmem_cache *announce_in_slab;
456 static void merge_announce(struct announce_in *ann)
458 char *msg = kmalloc(ann->total_size, GFP_KERNEL);
459 __u32 copy = 0;
461 if (msg == 0) {
462 /* try again when next packet arrives */
463 return;
466 while (copy != ann->total_size) {
467 __u32 currcpy;
468 struct sk_buff *skb;
470 if (skb_queue_empty(&(ann->skbs))) {
471 printk(KERN_ERR "net/cor/neighbor.c: sk_head ran "
472 "empty while merging packets\n");
473 goto free;
476 skb = skb_dequeue(&(ann->skbs));
478 currcpy = skb->len;
480 if (currcpy + copy > ann->total_size)
481 goto free;
483 #warning todo overlapping skbs
484 memcpy(msg + copy, skb->data, currcpy);
485 copy += currcpy;
486 kfree_skb(skb);
489 parse_announce(msg, ann->total_size, ann->dev, ann->source_hw);
491 free:
492 if (msg != 0)
493 kfree(msg);
495 dev_put(ann->dev);
496 list_del(&(ann->lh));
497 kmem_cache_free(announce_in_slab, ann);
500 static int _rcv_announce(struct sk_buff *skb, struct announce_in *ann)
502 struct skb_procstate *ps = skb_pstate(skb);
504 __u32 offset = ps->funcstate.announce.offset;
505 __u32 len = skb->len;
507 __u32 curroffset = 0;
508 __u32 prevoffset = 0;
509 __u32 prevlen = 0;
511 struct sk_buff *curr = ann->skbs.next;
513 if (len + offset > ann->total_size) {
514 /* invalid header */
515 kfree_skb(skb);
516 return 0;
520 * Try to find the right place to insert in the sorted list. This
521 * means to process the list until we find a skb which has a greater
522 * offset, so we can insert before it to keep the sort order. However,
523 * this is complicated by the fact that the new skb must not be inserted
524 * between 2 skbs if there is no data missing in between. So the loop
525 * runs has to keep running until there is either a gap to insert or
526 * we see that this data has already been received.
528 while ((void *) curr != (void *) &(ann->skbs)) {
529 struct skb_procstate *currps = skb_pstate(skb);
531 curroffset = currps->funcstate.announce.offset;
533 if (curroffset > offset && (prevoffset + prevlen) < curroffset)
534 break;
536 prevoffset = curroffset;
537 prevlen = curr->len;
538 curr = curr->next;
540 if ((offset+len) <= (prevoffset+prevlen)) {
541 /* we already have this data */
542 kfree_skb(skb);
543 return 0;
548 * Calculate how much data was really received, by substracting
549 * the bytes we already have.
551 if (unlikely(prevoffset + prevlen > offset)) {
552 len -= (prevoffset + prevlen) - offset;
553 offset = prevoffset + prevlen;
556 if (unlikely((void *) curr != (void *) &(ann->skbs) &&
557 (offset + len) > curroffset))
558 len = curroffset - offset;
560 ann->received_size += len;
561 BUG_ON(ann->received_size > ann->total_size);
562 __skb_queue_before(&(ann->skbs), curr, skb);
563 ann->last_received_packet = get_jiffies_64();
565 if (ann->received_size == ann->total_size)
566 merge_announce(ann);
567 else if (ann->skbs.qlen >= 16)
568 return 1;
570 return 0;
573 void rcv_announce(struct sk_buff *skb)
575 struct skb_procstate *ps = skb_pstate(skb);
576 struct announce_in *curr = 0;
577 struct announce_in *leastactive = 0;
578 __u32 list_size = 0;
580 __u32 announce_proto_version = pull_u32(skb, 1);
581 __u32 packet_version = pull_u32(skb, 1);
582 __u32 total_size = pull_u32(skb, 1);
584 char source_hw[MAX_ADDR_LEN];
585 memset(source_hw, 0, MAX_ADDR_LEN);
586 if (skb->dev->header_ops != 0 &&
587 skb->dev->header_ops->parse != 0)
588 skb->dev->header_ops->parse(skb, source_hw);
590 ps->funcstate.announce.offset = pull_u32(skb, 1);
592 if (total_size > 8192)
593 goto discard;
595 mutex_lock(&(neighbor_operation_lock));
597 if (announce_proto_version != 0)
598 goto discard;
600 curr = (struct announce_in *) announce_list.next;
602 while (((struct list_head *) curr) != &(announce_list)) {
603 list_size++;
604 if (curr->dev == skb->dev &&
605 memcmp(curr->source_hw, source_hw, MAX_ADDR_LEN) == 0 &&
606 curr->announce_proto_version == announce_proto_version &&
607 curr->packet_version == packet_version &&
608 curr->total_size == total_size)
609 goto found;
611 if (leastactive == 0 || curr->last_received_packet <
612 leastactive->last_received_packet)
613 leastactive = curr;
615 curr = (struct announce_in *) curr->lh.next;
618 if (list_size >= 128) {
619 BUG_ON(leastactive == 0);
620 curr = leastactive;
622 curr->last_received_packet = get_jiffies_64();
624 while (!skb_queue_empty(&(curr->skbs))) {
625 struct sk_buff *skb2 = skb_dequeue(&(curr->skbs));
626 kfree_skb(skb2);
629 dev_put(curr->dev);
630 } else {
631 curr = kmem_cache_alloc(announce_in_slab,
632 GFP_KERNEL);
633 if (curr == 0)
634 goto discard;
636 skb_queue_head_init(&(curr->skbs));
637 list_add_tail((struct list_head *) curr, &announce_list);
640 curr->packet_version = packet_version;
641 curr->total_size = total_size;
642 curr->received_size = 0;
643 curr->announce_proto_version = announce_proto_version;
644 curr->dev = skb->dev;
645 dev_hold(curr->dev);
646 memcpy(curr->source_hw, source_hw, MAX_ADDR_LEN);
648 found:
649 if (_rcv_announce(skb, curr)) {
650 list_del((struct list_head *) curr);
651 dev_put(curr->dev);
652 kmem_cache_free(announce_in_slab, curr);
655 if (0) {
656 discard:
657 kfree_skb(skb);
660 mutex_unlock(&(neighbor_operation_lock));
663 struct announce {
664 struct ref_counter refs;
666 __u32 packet_version;
667 char *announce_msg;
668 __u32 announce_msg_len;
671 struct announce *last_announce;
673 struct announce_data {
674 struct delayed_work announce_work;
676 struct net_device *dev;
678 struct announce *ann;
680 struct list_head lh;
682 __u32 curr_announce_msg_offset;
683 __u64 scheduled_announce_timer;
686 static void _splitsend_announce(struct announce_data *ann)
688 struct sk_buff *skb;
689 __u32 packet_size = 256;
690 __u32 remainingdata = ann->ann->announce_msg_len -
691 ann->curr_announce_msg_offset;
692 __u32 headroom = LL_ALLOCATED_SPACE(ann->dev);
693 __u32 overhead = 17 + headroom;
694 char *header;
695 char *ptr;
697 if (remainingdata < packet_size)
698 packet_size = remainingdata;
700 skb = alloc_skb(packet_size + overhead, GFP_KERNEL);
701 if (unlikely(0 == skb))
702 return;
704 skb->protocol = htons(ETH_P_COR);
705 skb->dev = ann->dev;
706 skb_reserve(skb, overhead);
708 if(unlikely(dev_hard_header(skb, ann->dev, ETH_P_COR,
709 ann->dev->broadcast, ann->dev->dev_addr, skb->len) < 0))
710 goto out_err;
712 skb_reset_network_header(skb);
714 header = skb_put(skb, 17);
715 if (unlikely(header == 0))
716 goto out_err;
718 header[0] = PACKET_TYPE_ANNOUNCE;
720 put_u32(header + 1, 0, 1); /* announce proto version */
721 put_u32(header + 5, ann->ann->packet_version, 1); /* packet version */
722 put_u32(header + 9, ann->ann->announce_msg_len, 1); /* total size */
723 put_u32(header + 13, ann->curr_announce_msg_offset, 1); /* offset */
725 ptr = skb_put(skb, packet_size);
726 if (unlikely(ptr == 0))
727 goto out_err;
729 memcpy(ptr, ann->ann->announce_msg + ann->curr_announce_msg_offset, packet_size);
730 dev_queue_xmit(skb);
732 ann->curr_announce_msg_offset += packet_size;
734 if (ann->curr_announce_msg_offset == ann->ann->announce_msg_len)
735 ann->curr_announce_msg_offset = 0;
737 if (0) {
738 out_err:
739 if (skb != 0)
740 kfree_skb(skb);
744 static void splitsend_announce(struct work_struct *work)
746 struct announce_data *ann = container_of(to_delayed_work(work),
747 struct announce_data, announce_work);
748 int reschedule = 0;
750 mutex_lock(&(neighbor_operation_lock));
752 if (ann->dev == 0)
753 goto out;
755 reschedule = 1;
757 if (ann->ann == 0 && last_announce == 0)
758 goto out;
760 if (ann->curr_announce_msg_offset == 0 && ann->ann != last_announce) {
761 if (ann->ann != 0)
762 ref_counter_decr(&(ann->ann->refs));
763 ann->ann = last_announce;
764 ref_counter_incr(&(ann->ann->refs));
767 _splitsend_announce(ann);
768 out:
769 mutex_unlock(&(neighbor_operation_lock));
771 if (reschedule) {
772 int target_delay_ms = 500;
773 int target_delay_jiffies = msecs_to_jiffies(target_delay_ms);
774 __u64 jiffies = get_jiffies_64();
775 int delay;
777 ann->scheduled_announce_timer += target_delay_jiffies;
779 delay = ann->scheduled_announce_timer - jiffies;
780 if (delay < 0)
781 delay = 0;
783 INIT_DELAYED_WORK(&(ann->announce_work), splitsend_announce);
784 schedule_delayed_work(&(ann->announce_work), delay);
788 static void announce_free(struct ref_counter *ref)
790 struct announce *ann = container_of(ref, struct announce, refs);
791 kfree(&(ann->announce_msg));
792 kfree(ann);
795 static struct ref_counter_def announce_ref = {
796 .free = announce_free
800 void blacklist_neighbor(struct neighbor *nb)
802 #warning todo
805 static struct announce_data *get_announce_by_netdev(struct net_device *dev)
807 struct list_head *lh = announce_out_list.next;
809 while (lh != &announce_out_list) {
810 struct announce_data *curr = (struct announce_data *)(
811 ((char *) lh) -
812 offsetof(struct announce_data, lh));
814 if (curr->dev == dev)
815 return curr;
818 return 0;
821 static void announce_sent_adddev(struct net_device *dev)
823 struct announce_data *ann;
825 ann = kmalloc(sizeof(struct announce_data), GFP_KERNEL);
827 if (ann == 0) {
828 printk(KERN_ERR "cor cannot allocate memory for sending "
829 "announces");
830 return;
833 memset(ann, 0, sizeof(struct announce_data));
835 dev_hold(dev);
836 ann->dev = dev;
838 mutex_lock(&(neighbor_operation_lock));
839 list_add_tail(&(ann->lh), &announce_out_list);
840 mutex_unlock(&(neighbor_operation_lock));
842 ann->scheduled_announce_timer = get_jiffies_64();
843 INIT_DELAYED_WORK(&(ann->announce_work), splitsend_announce);
844 schedule_delayed_work(&(ann->announce_work), 1);
847 static void announce_sent_rmdev(struct net_device *dev)
849 struct announce_data *ann;
851 mutex_lock(&(neighbor_operation_lock));
853 ann = get_announce_by_netdev(dev);
855 if (ann == 0)
856 goto out;
858 dev_put(ann->dev);
859 ann->dev = 0;
861 out:
862 mutex_unlock(&(neighbor_operation_lock));
865 int netdev_notify_func(struct notifier_block *not, unsigned long event,
866 void *ptr)
868 struct net_device *dev = (struct net_device *) ptr;
870 switch(event){
871 case NETDEV_UP:
872 announce_sent_adddev(dev);
873 break;
874 case NETDEV_DOWN:
875 announce_sent_rmdev(dev);
876 break;
877 case NETDEV_REBOOT:
878 case NETDEV_CHANGE:
879 case NETDEV_REGISTER:
880 case NETDEV_UNREGISTER:
881 case NETDEV_CHANGEMTU:
882 case NETDEV_CHANGEADDR:
883 case NETDEV_GOING_DOWN:
884 case NETDEV_CHANGENAME:
885 case NETDEV_FEAT_CHANGE:
886 case NETDEV_BONDING_FAILOVER:
887 break;
888 default:
889 return 1;
892 return 0;
895 static int set_announce(char *msg, __u32 len)
897 struct announce *ann = kmalloc(sizeof(struct announce), GFP_KERNEL);
899 if (ann == 0) {
900 kfree(msg);
901 return 1;
904 memset(ann, 0, sizeof(struct announce));
906 ann->announce_msg = msg;
907 ann->announce_msg_len = len;
909 ref_counter_init(&(ann->refs), &announce_ref);
911 mutex_lock(&(neighbor_operation_lock));
913 if (last_announce != 0) {
914 ann->packet_version = last_announce->packet_version + 1;
915 ref_counter_decr(&(last_announce->refs));
918 last_announce = ann;
920 mutex_unlock(&(neighbor_operation_lock));
922 return 0;
925 static int generate_announce(void)
927 __u32 addrtypelen = strlen(addrtype);
929 __u32 hdr_len = 16;
930 __u32 cmd_hdr_len = 8;
931 __u32 cmd_len = 2 + 2 + addrtypelen + addrlen;
933 __u32 len = hdr_len + cmd_hdr_len + cmd_len;
934 __u32 offset = 0;
936 char *msg = kmalloc(len, GFP_KERNEL);
937 if (msg == 0)
938 return 1;
940 put_u32(msg + offset, 0, 1); /* min_announce_proto_version */
941 offset += 4;
942 put_u32(msg + offset, 0, 1); /* max_announce_proto_version */
943 offset += 4;
944 put_u32(msg + offset, 0, 1); /* min_cor_proto_version */
945 offset += 4;
946 put_u32(msg + offset, 0, 1); /* max_cor_proto_version */
947 offset += 4;
950 put_u32(msg + offset, NEIGHCMD_ADDADDR, 1); /* command */
951 offset += 4;
952 put_u32(msg + offset, cmd_len, 1); /* command length */
953 offset += 4;
955 /* addrtypelen, addrlen */
956 put_u16(msg + offset, addrtypelen, 1);
957 offset += 2;
958 put_u16(msg + offset, addrlen, 1);
959 offset += 2;
961 /* addrtype, addr */
962 memcpy(msg + offset, addrtype, addrtypelen);
963 offset += addrtypelen;
964 memcpy(msg + offset, addr, addrlen);
965 offset += addrlen;
967 BUG_ON(offset != len);
969 return set_announce(msg, len);
972 int __init cor_neighbor_init(void)
974 addrlen = 16;
976 addr = kmalloc(addrlen, GFP_KERNEL);
977 if (addr == 0)
978 goto error_free2;
980 get_random_bytes(addr, addrlen);
982 nb_slab = kmem_cache_create("cor_neighbor", sizeof(struct neighbor), 8,
983 0, 0);
984 announce_in_slab = kmem_cache_create("cor_announce_in",
985 sizeof(struct announce_in), 8, 0, 0);
987 if (generate_announce())
988 goto error_free1;
990 memset(&netdev_notify, 0, sizeof(netdev_notify));
991 netdev_notify.notifier_call = netdev_notify_func;
992 register_netdevice_notifier(&netdev_notify);
994 return 0;
996 error_free1:
997 kfree(addr);
999 error_free2:
1000 return -ENOMEM;
1003 MODULE_LICENSE("GPL");