kernel packet sending bugfixes
[cor_2_6_31.git] / net / cor / neighbor.c
blobe0cebf44d888c6f3dd4153afbe1d19660efda1a1
1 /*
2 * Connection oriented routing
3 * Copyright (C) 2007-2009 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include "cor.h"
23 /**
24 * Splited packet data format:
25 * announce proto version [4]
26 * is 0, may be increased if format changes
27 * packet version [4]
28 * starts with 0, increments every time the data field changes
29 * total size [4]
30 * total data size of all merged packets
31 * offset [4]
32 * used to determine the order when merging the split packet
33 * unit is bytes
34 * [data]
35 * commulative checksum [8] (not yet)
36 * chunk 1 contains the checksum of the data in chunk 1
37 * chunk 2 contains the checksum of the data in chunk 1+2
38 * ...
40 * Data format of the announce packet "data" field:
41 * min_announce_proto_version [4]
42 * max_announce_proto_version [4]
43 * min_cor_proto_version [4]
44 * max_cor_proto_version [4]
45 * versions which are understood
47 * command [4]
48 * commandlength [4]
49 * commanddata [commandlength]
52 /* Commands */
54 #define NEIGHCMD_ADDADDR 1
56 /**
57 * Parameter:
58 * addrtypelen [2]
59 * addrtype [addrtypelen]
60 * addrlen [2]
61 * addr [addrlen]
64 #warning todo
65 /**
66 * "I hear you" data format:
67 * challange [todo]
71 DEFINE_MUTEX(neighbor_operation_lock);
73 char *addrtype = "id";
74 char *addr;
75 int addrlen;
78 LIST_HEAD(nb_list);
79 struct kmem_cache *nb_slab;
81 LIST_HEAD(announce_out_list);
83 struct notifier_block netdev_notify;
86 #define ADDRTYPE_UNKNOWN 0
87 #define ADDRTYPE_ID 1
89 static int get_addrtype(__u32 addrtypelen, char *addrtype)
91 if (addrtypelen == 2 &&
92 (addrtype[0] == 'i' || addrtype[0] == 'I') &&
93 (addrtype[1] == 'd' || addrtype[1] == 'D'))
94 return ADDRTYPE_ID;
96 return ADDRTYPE_UNKNOWN;
99 static void neighbor_free(struct ref_counter *ref)
101 struct neighbor *nb = container_of(ref, struct neighbor, refs);
102 BUG_ON(nb->nb_list.next != 0 || nb->nb_list.prev != 0);
103 if (nb->addr != 0)
104 kfree(nb->addr);
105 nb->addr = 0;
106 if (nb->dev != 0)
107 dev_put(nb->dev);
108 nb->dev = 0;
109 kmem_cache_free(nb_slab, nb);
112 static struct ref_counter_def neighbor_ref = {
113 .free = neighbor_free
116 static struct neighbor *alloc_neighbor(gfp_t allocflags)
118 struct neighbor *nb = kmem_cache_alloc(nb_slab, allocflags);
120 __u32 seqno;
122 if (nb == 0)
123 return 0;
125 memset(nb, 0, sizeof(struct neighbor));
127 ref_counter_init(&(nb->refs), &neighbor_ref);
128 mutex_init(&(nb->cmsg_lock));
129 /*struct control_msg_out *first_cm;
130 struct control_msg_out *last_cm;
131 unsigned long timedue;*/
132 nb->latency = 10;
133 INIT_LIST_HEAD(&(nb->control_msgs_out));
134 atomic_set(&(nb->ooo_packets), 0);
135 get_random_bytes((char *) &seqno, sizeof(seqno));
136 atomic_set(&(nb->kpacket_seqno), seqno);
137 mutex_init(&(nb->conn_list_lock));
138 INIT_LIST_HEAD(&(nb->rcv_conn_list));
139 INIT_LIST_HEAD(&(nb->snd_conn_list));
140 spin_lock_init(&(nb->retrans_lock));
141 spin_lock_init(&(nb->retrans_lock));
142 skb_queue_head_init(&(nb->retrans_list));
144 return nb;
147 static void add_neighbor(struct neighbor *nb)
149 struct list_head *currlh = nb_list.next;
151 BUG_ON((nb->addr == 0) != (nb->addrlen == 0));
153 while (currlh != &nb_list) {
154 struct neighbor *curr = container_of(currlh, struct neighbor,
155 nb_list);
157 if (curr->addrlen == nb->addrlen && memcmp(curr->addr, nb->addr,
158 curr->addrlen) == 0)
159 goto already_present;
161 currlh = currlh->next;
163 #warning todo refcnt
164 list_add_tail(&(nb->nb_list), &nb_list);
165 schedule_controlmsg_timerfunc(nb);
166 setup_timer(&(nb->retrans_timer), retransmit_timerfunc,
167 (unsigned long) nb);
169 if (0) {
170 already_present:
171 kmem_cache_free(nb_slab, nb);
175 struct neighbor *find_neigh(__u16 addrtypelen, __u8 *addrtype,
176 __u16 addrlen, __u8 *addr)
178 struct list_head *currlh;
179 struct neighbor *ret = 0;
181 if (get_addrtype(addrtypelen, addrtype) != ADDRTYPE_ID)
182 return 0;
184 mutex_lock(&(neighbor_operation_lock));
186 currlh = nb_list.next;
188 while (currlh != &nb_list) {
189 struct neighbor *curr = container_of(currlh, struct neighbor,
190 nb_list);
192 if (curr->addrlen == addrlen && memcmp(curr->addr, addr,
193 addrlen) == 0) {
194 ret = curr;
195 ref_counter_incr(&(ret->refs));
197 goto out;
200 currlh = currlh->next;
203 out:
204 mutex_unlock(&(neighbor_operation_lock));
206 return ret;
209 __u32 generate_neigh_list(char *buf, __u32 buflen, __u32 limit, __u32 offset)
211 struct list_head *currlh;
213 char *p_totalneighs = buf;
214 char *p_response_rows = buf + 4;
216 __u32 total = 0;
217 __u32 cnt = 0;
219 __u32 buf_offset = 8;
221 BUG_ON(buf == 0);
222 BUG_ON(buflen < 8);
224 mutex_lock(&(neighbor_operation_lock));
226 currlh = nb_list.next;
228 while (currlh != &nb_list) {
229 struct neighbor *curr = container_of(currlh, struct neighbor,
230 nb_list);
232 if (total != cnt)
233 goto cont;
235 if (unlikely(buflen - buf_offset - 6 - 2 - curr->addrlen < 0))
236 goto cont;
238 put_u16(buf + buf_offset, 1, 1);/* numaddr */
239 buf_offset += 2;
240 put_u16(buf + buf_offset, 2, 1);/* addrtypelen */
241 buf_offset += 2;
242 put_u16(buf + buf_offset, curr->addrlen, 1);/* addren */
243 buf_offset += 2;
244 buf[buf_offset] = 'i'; /* addrtype */
245 buf_offset += 1;
246 buf[buf_offset] = 'd';
247 buf_offset += 1;
248 memcpy(buf + buf_offset, curr->addr, curr->addrlen); /* addr */
249 buf_offset += curr->addrlen;
251 BUG_ON(buf_offset > buflen);
253 cnt++;
255 cont:
256 total++;
257 currlh = currlh->next;
260 mutex_unlock(&(neighbor_operation_lock));
262 put_u32(p_totalneighs, total, 1);
263 put_u32(p_response_rows, cnt, 1);
265 return buf_offset;
268 static __u32 pull_u32(struct sk_buff *skb, int convbo)
270 char *ptr = cor_pull_skb(skb, 4);
272 __u32 ret = 0;
274 BUG_ON(0 == ptr);
276 ((char *)&ret)[0] = ptr[0];
277 ((char *)&ret)[1] = ptr[1];
278 ((char *)&ret)[2] = ptr[2];
279 ((char *)&ret)[3] = ptr[3];
281 if (convbo)
282 return be32_to_cpu(ret);
283 return ret;
286 static int apply_announce_addaddr(struct neighbor *nb, __u32 cmd, __u32 len,
287 char *cmddata)
289 __u16 addrtypelen;
290 char *addrtype;
291 __u16 addrlen;
292 char *addr;
294 BUG_ON((nb->addr == 0) != (nb->addrlen == 0));
296 if (nb->addr != 0)
297 return 0;
299 if (len < 4)
300 return 0;
302 addrtypelen = be16_to_cpu(*((__u16 *) cmddata));
303 cmddata += 2;
304 len -= 2;
306 if (len < 2)
307 return 0;
309 addrlen = be16_to_cpu(*((__u16 *) cmddata));
310 cmddata += 2;
311 len -= 2;
313 addrtype = cmddata;
314 cmddata += addrtypelen;
315 len -= addrtypelen;
317 addr = cmddata;
318 cmddata += addrlen;
319 len -= addrlen;
321 if (len < 0)
322 return 0;
324 if (get_addrtype(addrtypelen, addrtype) != ADDRTYPE_ID)
325 return 0;
327 nb->addr = kmalloc(addrlen, GFP_KERNEL);
328 if (nb->addr == 0)
329 return 1;
331 memcpy(nb->addr, addr, addrlen);
332 nb->addrlen = addrlen;
334 return 0;
337 static void apply_announce_cmd(struct neighbor *nb, __u32 cmd, __u32 len,
338 char *cmddata)
340 if (cmd == NEIGHCMD_ADDADDR) {
341 apply_announce_addaddr(nb, cmd, len, cmddata);
342 } else {
343 /* ignore unknown cmds */
347 static void apply_announce_cmds(char *msg, __u32 len, struct net_device *dev,
348 char *source_hw)
350 struct neighbor *nb = alloc_neighbor(GFP_KERNEL);
352 if (nb == 0)
353 return;
355 while (len >= 8) {
356 __u32 cmd;
357 __u32 cmdlen;
359 cmd = be32_to_cpu(*((__u32 *) msg));
360 msg += 4;
361 len -= 4;
362 cmdlen = be32_to_cpu(*((__u32 *) msg));
363 msg += 4;
364 len -= 4;
366 BUG_ON(cmdlen > len);
368 apply_announce_cmd(nb, cmd, cmdlen, msg);
370 msg += cmdlen;
371 len -= cmdlen;
374 BUG_ON(len != 0);
376 dev_hold(dev);
377 nb->dev = dev;
378 add_neighbor(nb);
382 static int check_announce_cmds(char *msg, __u32 len)
384 while (len >= 8) {
385 __u32 cmd;
386 __u32 cmdlen;
388 cmd = be32_to_cpu(*((__u32 *) msg));
389 msg += 4;
390 len -= 4;
391 cmdlen = be32_to_cpu(*((__u32 *) msg));
392 msg += 4;
393 len -= 4;
395 /* malformated packet */
396 if (cmdlen > len)
397 return 1;
399 msg += cmdlen;
400 len -= cmdlen;
403 if (len != 0)
404 return 1;
406 return 0;
409 static void parse_announce(char *msg, __u32 len, struct net_device *dev,
410 char *source_hw)
412 __u32 min_announce_version;
413 __u32 max_announce_version;
414 __u32 min_cor_version;
415 __u32 max_cor_version;
417 if (len < 16)
418 return;
420 min_announce_version = be32_to_cpu(*((__u32 *) msg));
421 msg += 4;
422 len -= 4;
423 max_announce_version = be32_to_cpu(*((__u32 *) msg));
424 msg += 4;
425 len -= 4;
426 min_cor_version = be32_to_cpu(*((__u32 *) msg));
427 msg += 4;
428 len -= 4;
429 max_cor_version = be32_to_cpu(*((__u32 *) msg));
430 msg += 4;
431 len -= 4;
433 if (min_announce_version != 0)
434 return;
435 if (min_cor_version != 0)
436 return;
437 if (check_announce_cmds(msg, len)) {
438 return;
440 apply_announce_cmds(msg, len, dev, source_hw);
443 struct announce_in {
444 /* lh has to be first */
445 struct list_head lh;
446 struct sk_buff_head skbs; /* sorted by offset */
447 struct net_device *dev;
448 char source_hw[MAX_ADDR_LEN];
449 __u32 announce_proto_version;
450 __u32 packet_version;
451 __u32 total_size;
452 __u32 received_size;
453 __u64 last_received_packet;
456 LIST_HEAD(announce_list);
458 struct kmem_cache *announce_in_slab;
460 static void merge_announce(struct announce_in *ann)
462 char *msg = kmalloc(ann->total_size, GFP_KERNEL);
463 __u32 copy = 0;
465 if (msg == 0) {
466 /* try again when next packet arrives */
467 return;
470 while (copy != ann->total_size) {
471 __u32 currcpy;
472 struct sk_buff *skb;
474 if (skb_queue_empty(&(ann->skbs))) {
475 printk(KERN_ERR "net/cor/neighbor.c: sk_head ran "
476 "empty while merging packets\n");
477 goto free;
480 skb = skb_dequeue(&(ann->skbs));
482 currcpy = skb->len;
484 if (currcpy + copy > ann->total_size)
485 goto free;
487 #warning todo overlapping skbs
488 memcpy(msg + copy, skb->data, currcpy);
489 copy += currcpy;
490 kfree_skb(skb);
493 parse_announce(msg, ann->total_size, ann->dev, ann->source_hw);
495 free:
496 if (msg != 0)
497 kfree(msg);
499 dev_put(ann->dev);
500 list_del(&(ann->lh));
501 kmem_cache_free(announce_in_slab, ann);
504 static int _rcv_announce(struct sk_buff *skb, struct announce_in *ann)
506 struct skb_procstate *ps = skb_pstate(skb);
508 __u32 offset = ps->funcstate.announce.offset;
509 __u32 len = skb->len;
511 __u32 curroffset = 0;
512 __u32 prevoffset = 0;
513 __u32 prevlen = 0;
515 struct sk_buff *curr = ann->skbs.next;
517 if (len + offset > ann->total_size) {
518 /* invalid header */
519 kfree_skb(skb);
520 return 0;
524 * Try to find the right place to insert in the sorted list. This
525 * means to process the list until we find a skb which has a greater
526 * offset, so we can insert before it to keep the sort order. However,
527 * this is complicated by the fact that the new skb must not be inserted
528 * between 2 skbs if there is no data missing in between. So the loop
529 * runs has to keep running until there is either a gap to insert or
530 * we see that this data has already been received.
532 while ((void *) curr != (void *) &(ann->skbs)) {
533 struct skb_procstate *currps = skb_pstate(skb);
535 curroffset = currps->funcstate.announce.offset;
537 if (curroffset > offset && (prevoffset + prevlen) < curroffset)
538 break;
540 prevoffset = curroffset;
541 prevlen = curr->len;
542 curr = curr->next;
544 if ((offset+len) <= (prevoffset+prevlen)) {
545 /* we already have this data */
546 kfree_skb(skb);
547 return 0;
552 * Calculate how much data was really received, by substracting
553 * the bytes we already have.
555 if (unlikely(prevoffset + prevlen > offset)) {
556 len -= (prevoffset + prevlen) - offset;
557 offset = prevoffset + prevlen;
560 if (unlikely((void *) curr != (void *) &(ann->skbs) &&
561 (offset + len) > curroffset))
562 len = curroffset - offset;
564 ann->received_size += len;
565 BUG_ON(ann->received_size > ann->total_size);
566 __skb_queue_before(&(ann->skbs), curr, skb);
567 ann->last_received_packet = get_jiffies_64();
569 if (ann->received_size == ann->total_size)
570 merge_announce(ann);
571 else if (ann->skbs.qlen >= 16)
572 return 1;
574 return 0;
577 void rcv_announce(struct sk_buff *skb)
579 struct skb_procstate *ps = skb_pstate(skb);
580 struct announce_in *curr = 0;
581 struct announce_in *leastactive = 0;
582 __u32 list_size = 0;
584 __u32 announce_proto_version = pull_u32(skb, 1);
585 __u32 packet_version = pull_u32(skb, 1);
586 __u32 total_size = pull_u32(skb, 1);
588 char source_hw[MAX_ADDR_LEN];
589 memset(source_hw, 0, MAX_ADDR_LEN);
590 if (skb->dev->header_ops != 0 &&
591 skb->dev->header_ops->parse != 0)
592 skb->dev->header_ops->parse(skb, source_hw);
594 ps->funcstate.announce.offset = pull_u32(skb, 1);
596 if (total_size > 8192)
597 goto discard;
599 mutex_lock(&(neighbor_operation_lock));
601 if (announce_proto_version != 0)
602 goto discard;
604 curr = (struct announce_in *) announce_list.next;
606 while (((struct list_head *) curr) != &(announce_list)) {
607 list_size++;
608 if (curr->dev == skb->dev &&
609 memcmp(curr->source_hw, source_hw, MAX_ADDR_LEN) == 0 &&
610 curr->announce_proto_version == announce_proto_version &&
611 curr->packet_version == packet_version &&
612 curr->total_size == total_size)
613 goto found;
615 if (leastactive == 0 || curr->last_received_packet <
616 leastactive->last_received_packet)
617 leastactive = curr;
619 curr = (struct announce_in *) curr->lh.next;
622 if (list_size >= 128) {
623 BUG_ON(leastactive == 0);
624 curr = leastactive;
626 curr->last_received_packet = get_jiffies_64();
628 while (!skb_queue_empty(&(curr->skbs))) {
629 struct sk_buff *skb2 = skb_dequeue(&(curr->skbs));
630 kfree_skb(skb2);
633 dev_put(curr->dev);
634 } else {
635 curr = kmem_cache_alloc(announce_in_slab,
636 GFP_KERNEL);
637 if (curr == 0)
638 goto discard;
640 skb_queue_head_init(&(curr->skbs));
641 list_add_tail((struct list_head *) curr, &announce_list);
644 curr->packet_version = packet_version;
645 curr->total_size = total_size;
646 curr->received_size = 0;
647 curr->announce_proto_version = announce_proto_version;
648 curr->dev = skb->dev;
649 dev_hold(curr->dev);
650 memcpy(curr->source_hw, source_hw, MAX_ADDR_LEN);
652 found:
653 if (_rcv_announce(skb, curr)) {
654 list_del((struct list_head *) curr);
655 dev_put(curr->dev);
656 kmem_cache_free(announce_in_slab, curr);
659 if (0) {
660 discard:
661 kfree_skb(skb);
664 mutex_unlock(&(neighbor_operation_lock));
667 struct announce {
668 struct ref_counter refs;
670 __u32 packet_version;
671 char *announce_msg;
672 __u32 announce_msg_len;
675 struct announce *last_announce;
677 struct announce_data {
678 struct delayed_work announce_work;
680 struct net_device *dev;
682 struct announce *ann;
684 struct list_head lh;
686 __u32 curr_announce_msg_offset;
687 __u64 scheduled_announce_timer;
690 static void _splitsend_announce(struct announce_data *ann)
692 struct sk_buff *skb;
693 __u32 packet_size = 256;
694 __u32 remainingdata = ann->ann->announce_msg_len -
695 ann->curr_announce_msg_offset;
696 __u32 headroom = LL_ALLOCATED_SPACE(ann->dev);
697 __u32 overhead = 17 + headroom;
698 char *header;
699 char *ptr;
701 if (remainingdata < packet_size)
702 packet_size = remainingdata;
704 skb = alloc_skb(packet_size + overhead, GFP_KERNEL);
705 if (unlikely(0 == skb))
706 return;
708 skb->protocol = htons(ETH_P_COR);
709 skb->dev = ann->dev;
710 skb_reserve(skb, overhead);
712 if(unlikely(dev_hard_header(skb, ann->dev, ETH_P_COR,
713 ann->dev->broadcast, ann->dev->dev_addr, skb->len) < 0))
714 goto out_err;
716 skb_reset_network_header(skb);
718 header = skb_put(skb, 17);
719 if (unlikely(header == 0))
720 goto out_err;
722 header[0] = PACKET_TYPE_ANNOUNCE;
724 put_u32(header + 1, 0, 1); /* announce proto version */
725 put_u32(header + 5, ann->ann->packet_version, 1); /* packet version */
726 put_u32(header + 9, ann->ann->announce_msg_len, 1); /* total size */
727 put_u32(header + 13, ann->curr_announce_msg_offset, 1); /* offset */
729 ptr = skb_put(skb, packet_size);
730 if (unlikely(ptr == 0))
731 goto out_err;
733 memcpy(ptr, ann->ann->announce_msg + ann->curr_announce_msg_offset, packet_size);
734 dev_queue_xmit(skb);
736 ann->curr_announce_msg_offset += packet_size;
738 if (ann->curr_announce_msg_offset == ann->ann->announce_msg_len)
739 ann->curr_announce_msg_offset = 0;
741 if (0) {
742 out_err:
743 if (skb != 0)
744 kfree_skb(skb);
748 static void splitsend_announce(struct work_struct *work)
750 struct announce_data *ann = container_of(to_delayed_work(work),
751 struct announce_data, announce_work);
752 int reschedule = 0;
754 mutex_lock(&(neighbor_operation_lock));
756 if (ann->dev == 0)
757 goto out;
759 reschedule = 1;
761 if (ann->ann == 0 && last_announce == 0)
762 goto out;
764 if (ann->curr_announce_msg_offset == 0 && ann->ann != last_announce) {
765 if (ann->ann != 0)
766 ref_counter_decr(&(ann->ann->refs));
767 ann->ann = last_announce;
768 ref_counter_incr(&(ann->ann->refs));
771 _splitsend_announce(ann);
772 out:
773 mutex_unlock(&(neighbor_operation_lock));
775 if (reschedule) {
776 int target_delay_ms = 500;
777 int target_delay_jiffies = msecs_to_jiffies(target_delay_ms);
778 __u64 jiffies = get_jiffies_64();
779 int delay;
781 ann->scheduled_announce_timer += target_delay_jiffies;
783 delay = ann->scheduled_announce_timer - jiffies;
784 if (delay < 0)
785 delay = 0;
787 INIT_DELAYED_WORK(&(ann->announce_work), splitsend_announce);
788 schedule_delayed_work(&(ann->announce_work), delay);
792 static void announce_free(struct ref_counter *ref)
794 struct announce *ann = container_of(ref, struct announce, refs);
795 kfree(&(ann->announce_msg));
796 kfree(ann);
799 static struct ref_counter_def announce_ref = {
800 .free = announce_free
803 static struct announce_data *get_announce_by_netdev(struct net_device *dev)
805 struct list_head *lh = announce_out_list.next;
807 while (lh != &announce_out_list) {
808 struct announce_data *curr = (struct announce_data *)(
809 ((char *) lh) -
810 offsetof(struct announce_data, lh));
812 if (curr->dev == dev)
813 return curr;
816 return 0;
819 static void announce_sent_adddev(struct net_device *dev)
821 struct announce_data *ann;
823 ann = kmalloc(sizeof(struct announce_data), GFP_KERNEL);
825 if (ann == 0) {
826 printk(KERN_ERR "cor cannot allocate memory for sending "
827 "announces");
828 return;
831 memset(ann, 0, sizeof(struct announce_data));
833 dev_hold(dev);
834 ann->dev = dev;
836 mutex_lock(&(neighbor_operation_lock));
837 list_add_tail(&(ann->lh), &announce_out_list);
838 mutex_unlock(&(neighbor_operation_lock));
840 ann->scheduled_announce_timer = get_jiffies_64();
841 INIT_DELAYED_WORK(&(ann->announce_work), splitsend_announce);
842 schedule_delayed_work(&(ann->announce_work), 1);
845 static void announce_sent_rmdev(struct net_device *dev)
847 struct announce_data *ann;
849 mutex_lock(&(neighbor_operation_lock));
851 ann = get_announce_by_netdev(dev);
853 if (ann == 0)
854 goto out;
856 dev_put(ann->dev);
857 ann->dev = 0;
859 out:
860 mutex_unlock(&(neighbor_operation_lock));
863 int netdev_notify_func(struct notifier_block *not, unsigned long event,
864 void *ptr)
866 struct net_device *dev = (struct net_device *) ptr;
868 switch(event){
869 case NETDEV_UP:
870 announce_sent_adddev(dev);
871 break;
872 case NETDEV_DOWN:
873 announce_sent_rmdev(dev);
874 break;
875 case NETDEV_REBOOT:
876 case NETDEV_CHANGE:
877 case NETDEV_REGISTER:
878 case NETDEV_UNREGISTER:
879 case NETDEV_CHANGEMTU:
880 case NETDEV_CHANGEADDR:
881 case NETDEV_GOING_DOWN:
882 case NETDEV_CHANGENAME:
883 case NETDEV_FEAT_CHANGE:
884 case NETDEV_BONDING_FAILOVER:
885 break;
886 default:
887 return 1;
890 return 0;
893 static int set_announce(char *msg, __u32 len)
895 struct announce *ann = kmalloc(sizeof(struct announce), GFP_KERNEL);
897 if (ann == 0) {
898 kfree(msg);
899 return 1;
902 memset(ann, 0, sizeof(struct announce));
904 ann->announce_msg = msg;
905 ann->announce_msg_len = len;
907 ref_counter_init(&(ann->refs), &announce_ref);
909 mutex_lock(&(neighbor_operation_lock));
911 if (last_announce != 0) {
912 ann->packet_version = last_announce->packet_version + 1;
913 ref_counter_decr(&(last_announce->refs));
916 last_announce = ann;
918 mutex_unlock(&(neighbor_operation_lock));
920 return 0;
923 static int generate_announce(void)
925 __u32 addrtypelen = strlen(addrtype);
927 __u32 hdr_len = 16;
928 __u32 cmd_hdr_len = 8;
929 __u32 cmd_len = 2 + 2 + addrtypelen + addrlen;
931 __u32 len = hdr_len + cmd_hdr_len + cmd_len;
932 __u32 offset = 0;
934 char *msg = kmalloc(len, GFP_KERNEL);
935 if (msg == 0)
936 return 1;
938 put_u32(msg + offset, 0, 1); /* min_announce_proto_version */
939 offset += 4;
940 put_u32(msg + offset, 0, 1); /* max_announce_proto_version */
941 offset += 4;
942 put_u32(msg + offset, 0, 1); /* min_cor_proto_version */
943 offset += 4;
944 put_u32(msg + offset, 0, 1); /* max_cor_proto_version */
945 offset += 4;
948 put_u32(msg + offset, NEIGHCMD_ADDADDR, 1); /* command */
949 offset += 4;
950 put_u32(msg + offset, cmd_len, 1); /* command length */
951 offset += 4;
953 /* addrtypelen, addrlen */
954 put_u16(msg + offset, addrtypelen, 1);
955 offset += 2;
956 put_u16(msg + offset, addrlen, 1);
957 offset += 2;
959 /* addrtype, addr */
960 memcpy(msg + offset, addrtype, addrtypelen);
961 offset += addrtypelen;
962 memcpy(msg + offset, addr, addrlen);
963 offset += addrlen;
965 BUG_ON(offset != len);
967 return set_announce(msg, len);
970 int __init cor_neighbor_init(void)
972 addrlen = 16;
974 addr = kmalloc(addrlen, GFP_KERNEL);
975 if (addr == 0)
976 goto error_free2;
978 get_random_bytes(addr, addrlen);
980 nb_slab = kmem_cache_create("cor_neighbor", sizeof(struct neighbor), 8,
981 0, 0);
982 announce_in_slab = kmem_cache_create("cor_announce_in",
983 sizeof(struct announce_in), 8, 0, 0);
985 if (generate_announce())
986 goto error_free1;
988 memset(&netdev_notify, 0, sizeof(netdev_notify));
989 netdev_notify.notifier_call = netdev_notify_func;
990 register_netdevice_notifier(&netdev_notify);
992 return 0;
994 error_free1:
995 kfree(addr);
997 error_free2:
998 return -ENOMEM;
1001 MODULE_LICENSE("GPL");