sock recv, add neighbor
[cor_2_6_31.git] / net / cor / neighbor.c
blob67ca1b8609f4efe25036d340788ea58c015735f5
1 /*
2 * Connection oriented routing
3 * Copyright (C) 2007-2009 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include "cor.h"
23 /**
24 * Splited packet data format:
25 * announce proto version [4]
26 * is 0, may be increased if format changes
27 * packet version [4]
28 * starts with 0, increments every time the data field changes
29 * total size [4]
30 * total data size of all merged packets
31 * offset [4]
32 * used to determine the order when merging the split packet
33 * unit is bytes
34 * [data]
35 * commulative checksum [8] (not yet)
36 * chunk 1 contains the checksum of the data in chunk 1
37 * chunk 2 contains the checksum of the data in chunk 1+2
38 * ...
40 * Data format of the announce packet "data" field:
41 * min_announce_proto_version [4]
42 * max_announce_proto_version [4]
43 * min_cor_proto_version [4]
44 * max_cor_proto_version [4]
45 * versions which are understood
47 * command [4]
48 * commandlength [4]
49 * commanddata [commandlength]
52 /* Commands */
54 #define NEIGHCMD_ADDADDR 1
56 /**
57 * Parameter:
58 * addrtypelen [2]
59 * addrtype [addrtypelen]
60 * addrlen [2]
61 * addr [addrlen]
64 #warning todo
65 /**
66 * "I hear you" data format:
67 * challange [todo]
71 DEFINE_MUTEX(neighbor_operation_lock);
73 char *addrtype = "id";
74 char *addr;
75 int addrlen;
78 LIST_HEAD(nb_list);
79 struct kmem_cache *nb_slab;
81 LIST_HEAD(announce_out_list);
83 struct notifier_block netdev_notify;
86 #define ADDRTYPE_UNKNOWN 0
87 #define ADDRTYPE_ID 1
89 static int get_addrtype(__u32 addrtypelen, char *addrtype)
91 if (addrtypelen == 2 &&
92 (addrtype[0] == 'i' || addrtype[0] == 'I') &&
93 (addrtype[1] == 'd' || addrtype[1] == 'D'))
94 return ADDRTYPE_ID;
96 return ADDRTYPE_UNKNOWN;
99 static struct neighbor *alloc_neighbor(gfp_t allocflags)
101 struct neighbor *nb = kmem_cache_alloc(nb_slab, allocflags);
103 if (nb == 0)
104 return 0;
106 memset(nb, 0, sizeof(struct neighbor));
108 spin_lock_init(&(nb->cmsg_lock));
109 /*struct control_msg_out *first_cm;
110 struct control_msg_out *last_cm;
111 unsigned long timedue;*/
112 nb->latency = 10;
113 mutex_init(&(nb->conn_list_lock));
114 INIT_LIST_HEAD(&(nb->rcv_conn_list));
115 INIT_LIST_HEAD(&(nb->snd_conn_list));
116 spin_lock_init(&(nb->retrans_lock));
118 skb_queue_head_init(&(nb->retrans_list));
120 return nb;
123 static void add_neighbor(struct neighbor *nb)
125 struct list_head *currlh = nb_list.next;
127 BUG_ON((nb->addr == 0) != (nb->addrlen == 0));
129 while (currlh != &nb_list) {
130 struct neighbor *curr = container_of(currlh, struct neighbor,
131 nb_list);
133 if (curr->addrlen == nb->addrlen && memcmp(curr->addr, nb->addr,
134 curr->addrlen) == 0)
135 goto already_present;
137 currlh = currlh->next;
140 list_add_tail(&(nb->nb_list), &nb_list);
141 setup_timer(&(nb->cmsg_timer), controlmsg_timerfunc,
142 (unsigned long) nb);
143 setup_timer(&(nb->retrans_timer), retransmit_timerfunc,
144 (unsigned long) nb);
146 if (0) {
147 already_present:
148 kmem_cache_free(nb_slab, nb);
152 struct neighbor *find_neigh(__u16 addrtypelen, __u8 *addrtype,
153 __u16 addrlen, __u8 *addr)
155 struct list_head *currlh;
156 struct neighbor *ret = 0;
158 if (get_addrtype(addrtypelen, addrtype) != ADDRTYPE_ID)
159 return 0;
161 mutex_lock(&(neighbor_operation_lock));
163 currlh = nb_list.next;
165 while (currlh != &nb_list) {
166 struct neighbor *curr = container_of(currlh, struct neighbor,
167 nb_list);
169 if (curr->addrlen == addrlen && memcmp(curr->addr, addr,
170 addrlen) == 0) {
171 #warning todo refcnt
172 ret = curr;
173 goto out;
176 currlh = currlh->next;
179 out:
180 mutex_unlock(&(neighbor_operation_lock));
182 return ret;
185 __u32 generate_neigh_list(char *buf, __u32 buflen, __u32 limit, __u32 offset)
187 struct list_head *currlh;
189 char *p_totalneighs = buf;
190 char *p_response_rows = buf + 4;
192 __u32 total = 0;
193 __u32 cnt = 0;
195 __u32 buf_offset = 8;
197 BUG_ON(buf == 0);
198 BUG_ON(buflen < 8);
200 mutex_lock(&(neighbor_operation_lock));
202 currlh = nb_list.next;
204 while (currlh != &nb_list) {
205 struct neighbor *curr = container_of(currlh, struct neighbor,
206 nb_list);
208 if (total != cnt)
209 goto cont;
211 if (buflen - buf_offset - 6 - 2 - curr->addrlen < 0)
212 goto cont;
214 put_u16(buf + buf_offset, 1, 1);/* numaddr */
215 buf_offset += 2;
216 put_u16(buf + buf_offset, 2, 1);/* addrtypelen */
217 buf_offset += 2;
218 put_u16(buf + buf_offset, curr->addrlen, 1);/* addren */
219 buf_offset += 2;
220 buf[buf_offset] = 'i'; /* addrtype */
221 buf_offset += 1;
222 buf[buf_offset] = 'd';
223 buf_offset += 1;
224 memcpy(buf + buf_offset, curr->addr, curr->addrlen); /* addr */
225 buf_offset += curr->addrlen;
227 BUG_ON(buf_offset > buflen);
229 cnt++;
231 cont:
232 total++;
233 currlh = currlh->next;
236 mutex_unlock(&(neighbor_operation_lock));
238 put_u32(p_totalneighs, total, 1);
239 put_u32(p_response_rows, cnt, 1);
241 return buf_offset;
244 static __u32 pull_u32(struct sk_buff *skb, int convbo)
246 char *ptr = cor_pull_skb(skb, 4);
248 __u32 ret = 0;
250 BUG_ON(0 == ptr);
252 ((char *)&ret)[0] = ptr[0];
253 ((char *)&ret)[1] = ptr[1];
254 ((char *)&ret)[2] = ptr[2];
255 ((char *)&ret)[3] = ptr[3];
257 if (convbo)
258 return be32_to_cpu(ret);
259 return ret;
262 static int apply_announce_addaddr(struct neighbor *nb, __u32 cmd, __u32 len,
263 char *cmddata)
265 __u16 addrtypelen;
266 char *addrtype;
267 __u16 addrlen;
268 char *addr;
270 BUG_ON((nb->addr == 0) != (nb->addrlen == 0));
272 if (nb->addr != 0)
273 return 0;
275 if (len < 4)
276 return 0;
278 addrtypelen = be16_to_cpu(*((__u16 *) cmddata));
279 cmddata += 2;
280 len -= 2;
282 if (len < 2)
283 return 0;
285 addrlen = be16_to_cpu(*((__u16 *) cmddata));
286 cmddata += 2;
287 len -= 2;
289 addrtype = cmddata;
290 cmddata += addrtypelen;
291 len -= addrtypelen;
293 addr = cmddata;
294 cmddata += addrlen;
295 len -= addrlen;
297 if (len < 0)
298 return 0;
300 if (get_addrtype(addrtypelen, addrtype) != ADDRTYPE_ID)
301 return 0;
303 nb->addr = kmalloc(addrlen, GFP_KERNEL);
304 if (nb->addr == 0)
305 return 1;
307 memcpy(nb->addr, addr, addrlen);
308 nb->addrlen = addrlen;
310 return 0;
313 static void apply_announce_cmd(struct neighbor *nb, __u32 cmd, __u32 len,
314 char *cmddata)
316 if (cmd == NEIGHCMD_ADDADDR) {
317 apply_announce_addaddr(nb, cmd, len, cmddata);
318 } else {
319 /* ignore unknown cmds */
323 static void apply_announce_cmds(char *msg, __u32 len, struct net_device *dev,
324 char *source_hw)
326 struct neighbor *nb = alloc_neighbor(GFP_KERNEL);
328 if (nb == 0)
329 return;
331 while (len >= 8) {
332 __u32 cmd;
333 __u32 cmdlen;
335 cmd = be32_to_cpu(*((__u32 *) msg));
336 msg += 4;
337 len -= 4;
338 cmdlen = be32_to_cpu(*((__u32 *) msg));
339 msg += 4;
340 len -= 4;
342 BUG_ON(cmdlen > len);
344 apply_announce_cmd(nb, cmd, cmdlen, msg);
346 msg += cmdlen;
347 len -= cmdlen;
350 BUG_ON(len != 0);
352 dev_hold(dev);
353 nb->dev = dev;
354 add_neighbor(nb);
358 static int check_announce_cmds(char *msg, __u32 len)
360 while (len >= 8) {
361 __u32 cmd;
362 __u32 cmdlen;
364 cmd = be32_to_cpu(*((__u32 *) msg));
365 msg += 4;
366 len -= 4;
367 cmdlen = be32_to_cpu(*((__u32 *) msg));
368 msg += 4;
369 len -= 4;
371 /* malformated packet */
372 if (cmdlen > len)
373 return 1;
375 msg += cmdlen;
376 len -= cmdlen;
379 if (len != 0)
380 return 1;
382 return 0;
385 static void parse_announce(char *msg, __u32 len, struct net_device *dev,
386 char *source_hw)
388 __u32 min_announce_version;
389 __u32 max_announce_version;
390 __u32 min_cor_version;
391 __u32 max_cor_version;
393 if (len < 16)
394 return;
396 min_announce_version = be32_to_cpu(*((__u32 *) msg));
397 msg += 4;
398 len -= 4;
399 max_announce_version = be32_to_cpu(*((__u32 *) msg));
400 msg += 4;
401 len -= 4;
402 min_cor_version = be32_to_cpu(*((__u32 *) msg));
403 msg += 4;
404 len -= 4;
405 max_cor_version = be32_to_cpu(*((__u32 *) msg));
406 msg += 4;
407 len -= 4;
409 if (min_announce_version != 0)
410 return;
411 if (min_cor_version != 0)
412 return;
414 if (check_announce_cmds(msg, len))
415 return;
417 apply_announce_cmds(msg, len, dev, source_hw);
420 struct announce_in {
421 /* lh has to be first */
422 struct list_head lh;
423 struct sk_buff_head skbs; /* sorted by offset */
424 struct net_device *dev;
425 char source_hw[MAX_ADDR_LEN];
426 __u32 announce_proto_version;
427 __u32 packet_version;
428 __u32 total_size;
429 __u32 received_size;
430 __u64 last_received_packet;
433 LIST_HEAD(announce_list);
435 struct kmem_cache *announce_in_slab;
437 static void merge_announce(struct announce_in *ann)
439 char *msg = kmalloc(GFP_KERNEL, ann->total_size);
440 __u32 copy = 0;
442 if (msg == 0) {
443 /* try again when next packet arrives */
444 return;
447 while (copy != ann->total_size) {
448 __u32 currcpy;
449 struct sk_buff *skb;
451 if (skb_queue_empty(&(ann->skbs))) {
452 printk(KERN_ERR "net/cor/neighbor.c: sk_head ran "
453 "empty while merging packets\n");
454 goto free;
457 skb = skb_dequeue(&(ann->skbs));
459 currcpy = skb->len;
461 if (currcpy + copy > ann->total_size)
462 goto free;
464 #warning todo overlapping skbs
465 memcpy(msg + copy, skb->data, currcpy);
466 copy += currcpy;
467 kfree_skb(skb);
470 parse_announce(msg, ann->total_size, ann->dev, ann->source_hw);
472 free:
473 if (msg != 0)
474 kfree(msg);
476 dev_put(ann->dev);
477 list_del(&(ann->lh));
478 kmem_cache_free(announce_in_slab, ann);
481 static int _rcv_announce(struct sk_buff *skb, struct announce_in *ann)
483 struct skb_procstate *ps = skb_pstate(skb);
485 __u32 offset = ps->funcstate.announce.offset;
486 __u32 len = skb->len;
488 __u32 curroffset = 0;
489 __u32 prevoffset = 0;
490 __u32 prevlen = 0;
492 struct sk_buff *curr = ann->skbs.next;
494 if (len + offset > ann->total_size) {
495 /* invalid header */
496 kfree_skb(skb);
497 return 0;
501 * Try to find the right place to insert in the sorted list. This
502 * means to process the list until we find a skb which has a greater
503 * offset, so we can insert before it to keep the sort order. However,
504 * this is complicated by the fact that the new skb must not be inserted
505 * between 2 skbs if there is no data missing in between. So the loop
506 * runs has to keep running until there is either a gap to insert or
507 * we see that this data has already been received.
509 while ((void *) curr != (void *) &(ann->skbs)) {
510 struct skb_procstate *currps = skb_pstate(skb);
512 curroffset = currps->funcstate.announce.offset;
514 if (curroffset > offset && (prevoffset + prevlen) < curroffset)
515 break;
517 prevoffset = curroffset;
518 prevlen = curr->len;
519 curr = curr->next;
521 if ((offset+len) <= (prevoffset+prevlen)) {
522 /* we already have this data */
523 kfree_skb(skb);
524 return 0;
529 * Calculate how much data was really received, by substracting
530 * the bytes we already have.
532 if (unlikely(prevoffset + prevlen > offset)) {
533 len -= (prevoffset + prevlen) - offset;
534 offset = prevoffset + prevlen;
537 if (unlikely((void *) curr != (void *) &(ann->skbs) &&
538 (offset + len) > curroffset))
539 len = curroffset - offset;
541 ann->received_size += len;
542 BUG_ON(ann->received_size > ann->total_size);
543 __skb_queue_before(&(ann->skbs), curr, skb);
544 ann->last_received_packet = get_jiffies_64();
546 if (ann->received_size == ann->total_size)
547 merge_announce(ann);
548 else if (ann->skbs.qlen >= 16)
549 return 1;
551 return 0;
554 void rcv_announce(struct sk_buff *skb)
556 struct skb_procstate *ps = skb_pstate(skb);
557 struct announce_in *curr = 0;
558 struct announce_in *leastactive = 0;
559 __u32 list_size = 0;
561 __u32 announce_proto_version = pull_u32(skb, 1);
562 __u32 packet_version = pull_u32(skb, 1);
563 __u32 total_size = pull_u32(skb, 1);
565 char source_hw[MAX_ADDR_LEN];
566 memset(source_hw, 0, MAX_ADDR_LEN);
567 if (skb->dev->header_ops != 0 &&
568 skb->dev->header_ops->parse != 0)
569 skb->dev->header_ops->parse(skb, source_hw);
571 ps->funcstate.announce.offset = pull_u32(skb, 1);
573 if (total_size > 8192)
574 goto discard;
576 mutex_lock(&(neighbor_operation_lock));
578 if (announce_proto_version != 0)
579 goto discard;
581 curr = (struct announce_in *) announce_list.next;
583 while (((struct list_head *) curr) != &(announce_list)) {
584 list_size++;
585 if (curr->dev == skb->dev &&
586 memcmp(curr->source_hw, source_hw, MAX_ADDR_LEN) == 0 &&
587 curr->announce_proto_version == announce_proto_version &&
588 curr->packet_version == packet_version &&
589 curr->total_size == total_size)
590 goto found;
592 if (leastactive == 0 || curr->last_received_packet <
593 leastactive->last_received_packet)
594 leastactive = curr;
596 curr = (struct announce_in *) curr->lh.next;
599 if (list_size >= 128) {
600 BUG_ON(leastactive == 0);
601 curr = leastactive;
603 curr->last_received_packet = get_jiffies_64();
605 while (!skb_queue_empty(&(curr->skbs))) {
606 struct sk_buff *skb2 = skb_dequeue(&(curr->skbs));
607 kfree_skb(skb2);
610 dev_put(curr->dev);
611 } else {
612 curr = kmem_cache_alloc(announce_in_slab,
613 GFP_KERNEL);
614 if (curr == 0)
615 goto discard;
617 skb_queue_head_init(&(curr->skbs));
618 list_add_tail((struct list_head *) curr, &announce_list);
621 curr->packet_version = packet_version;
622 curr->total_size = total_size;
623 curr->received_size = 0;
624 curr->announce_proto_version = announce_proto_version;
625 curr->dev = skb->dev;
626 dev_hold(curr->dev);
627 memcpy(curr->source_hw, source_hw, MAX_ADDR_LEN);
629 found:
630 if (_rcv_announce(skb, curr)) {
631 list_del((struct list_head *) curr);
632 dev_put(curr->dev);
633 kmem_cache_free(announce_in_slab, curr);
636 if (0) {
637 discard:
638 kfree_skb(skb);
641 mutex_unlock(&(neighbor_operation_lock));
644 struct announce {
645 struct ref_counter refs;
647 __u32 packet_version;
648 char *announce_msg;
649 __u32 announce_msg_len;
652 struct announce *last_announce;
654 struct announce_data {
655 struct delayed_work announce_work;
657 struct net_device *dev;
659 struct announce *ann;
661 struct list_head lh;
663 __u32 curr_announce_msg_offset;
664 __u64 scheduled_announce_timer;
667 static void _splitsend_announce(struct announce_data *ann)
669 struct sk_buff *skb;
670 __u32 packet_size = 256;
671 __u32 remainingdata = ann->ann->announce_msg_len -
672 ann->curr_announce_msg_offset;
673 __u32 headroom = LL_ALLOCATED_SPACE(ann->dev);
674 __u32 overhead = 17 + headroom;
675 char *header;
676 char *ptr;
678 if (remainingdata < packet_size)
679 packet_size = remainingdata;
681 skb = alloc_skb(packet_size + overhead, GFP_KERNEL);
682 if (unlikely(0 == skb))
683 return;
685 skb->protocol = htons(ETH_P_COR);
686 skb->dev = ann->dev;
687 skb_reserve(skb, overhead);
689 if(unlikely(dev_hard_header(skb, ann->dev, ETH_P_COR,
690 ann->dev->broadcast, ann->dev->dev_addr, skb->len) < 0))
691 goto out_err;
693 skb_reset_network_header(skb);
695 header = skb_put(skb, 17);
696 if (unlikely(header == 0))
697 goto out_err;
699 header[0] = PACKET_TYPE_ANNOUNCE;
701 put_u32(header + 1, 0, 1); /* announce proto version */
702 put_u32(header + 5, ann->ann->packet_version, 1); /* packet version */
703 put_u32(header + 9, ann->ann->announce_msg_len, 1); /* total size */
704 put_u32(header + 13, ann->curr_announce_msg_offset, 1); /* offset */
706 ptr = skb_put(skb, packet_size);
707 if (unlikely(ptr == 0))
708 goto out_err;
710 memcpy(ptr, ann->ann->announce_msg + ann->curr_announce_msg_offset, packet_size);
711 dev_queue_xmit(skb);
713 ann->curr_announce_msg_offset += packet_size;
715 if (ann->curr_announce_msg_offset == ann->ann->announce_msg_len)
716 ann->curr_announce_msg_offset = 0;
718 if (0) {
719 out_err:
720 if (skb != 0)
721 kfree_skb(skb);
725 static void splitsend_announce(struct work_struct *work)
727 struct announce_data *ann = (struct announce_data *) work;
728 int reschedule = 0;
730 mutex_lock(&(neighbor_operation_lock));
732 if (ann->dev == 0)
733 goto out;
735 reschedule = 1;
737 if (ann->ann == 0 && last_announce == 0)
738 goto out;
740 if (ann->curr_announce_msg_offset == 0 && ann->ann != last_announce) {
741 if (ann->ann != 0)
742 ref_counter_decr(&(ann->ann->refs));
743 ann->ann = last_announce;
744 ref_counter_incr(&(ann->ann->refs));
747 _splitsend_announce(ann);
748 out:
749 mutex_unlock(&(neighbor_operation_lock));
751 if (reschedule) {
752 int target_delay_ms = 100;
753 int target_delay_jiffies = msecs_to_jiffies(target_delay_ms);
754 __u64 jiffies = get_jiffies_64();
755 int delay;
757 ann->scheduled_announce_timer += target_delay_jiffies;
759 delay = ann->scheduled_announce_timer - jiffies;
760 if (delay < 0)
761 delay = 0;
763 INIT_DELAYED_WORK(&(ann->announce_work), splitsend_announce);
764 schedule_delayed_work(&(ann->announce_work), delay);
768 static void announce_free(struct ref_counter *ref)
770 struct announce *ann = container_of(ref, struct announce, refs);
771 kfree(&(ann->announce_msg));
772 kfree(ann);
775 static struct ref_counter_def announce_ref = {
776 .free = announce_free
780 void blacklist_neighbor(struct neighbor *nb)
782 #warning todo
785 static struct announce_data *get_announce_by_netdev(struct net_device *dev)
787 struct list_head *lh = announce_out_list.next;
789 while (lh != &announce_out_list) {
790 struct announce_data *curr = (struct announce_data *)(
791 ((char *) lh) -
792 offsetof(struct announce_data, lh));
794 if (curr->dev == dev)
795 return curr;
798 return 0;
801 static void announce_sent_adddev(struct net_device *dev)
803 struct announce_data *ann;
805 ann = kmalloc(sizeof(struct announce_data), GFP_KERNEL);
807 if (ann == 0) {
808 printk(KERN_ERR "cor cannot allocate memory for sending "
809 "announces");
810 return;
813 memset(ann, 0, sizeof(struct announce_data));
815 dev_hold(dev);
816 ann->dev = dev;
818 mutex_lock(&(neighbor_operation_lock));
819 list_add_tail(&(ann->lh), &announce_out_list);
820 mutex_unlock(&(neighbor_operation_lock));
822 ann->scheduled_announce_timer = get_jiffies_64();
823 INIT_DELAYED_WORK(&(ann->announce_work), splitsend_announce);
824 schedule_delayed_work(&(ann->announce_work), 1);
827 static void announce_sent_rmdev(struct net_device *dev)
829 struct announce_data *ann;
831 mutex_lock(&(neighbor_operation_lock));
833 ann = get_announce_by_netdev(dev);
835 if (ann == 0)
836 goto out;
838 dev_put(ann->dev);
839 ann->dev = 0;
841 out:
842 mutex_unlock(&(neighbor_operation_lock));
845 int netdev_notify_func(struct notifier_block *not, unsigned long event,
846 void *ptr)
848 struct net_device *dev = (struct net_device *) ptr;
850 switch(event){
851 case NETDEV_UP:
852 announce_sent_adddev(dev);
853 break;
854 case NETDEV_DOWN:
855 announce_sent_rmdev(dev);
856 break;
857 case NETDEV_REBOOT:
858 case NETDEV_CHANGE:
859 case NETDEV_REGISTER:
860 case NETDEV_UNREGISTER:
861 case NETDEV_CHANGEMTU:
862 case NETDEV_CHANGEADDR:
863 case NETDEV_GOING_DOWN:
864 case NETDEV_CHANGENAME:
865 case NETDEV_FEAT_CHANGE:
866 case NETDEV_BONDING_FAILOVER:
867 break;
868 default:
869 return 1;
872 return 0;
875 static int set_announce(char *msg, __u32 len)
877 struct announce *ann = kmalloc(sizeof(struct announce), GFP_KERNEL);
879 if (ann == 0) {
880 kfree(msg);
881 return 1;
884 memset(ann, 0, sizeof(struct announce));
886 ann->announce_msg = msg;
887 ann->announce_msg_len = len;
889 ref_counter_init(&(ann->refs), &announce_ref);
891 mutex_lock(&(neighbor_operation_lock));
893 if (last_announce != 0) {
894 ann->packet_version = last_announce->packet_version + 1;
895 ref_counter_decr(&(last_announce->refs));
898 last_announce = ann;
900 mutex_unlock(&(neighbor_operation_lock));
902 return 0;
905 static int generate_announce(void)
907 __u32 addrtypelen = strlen(addrtype);
909 __u32 hdr_len = 16;
910 __u32 cmd_hdr_len = 8;
911 __u32 cmd_len = 2 + 2 + addrtypelen + addrlen;
913 __u32 len = hdr_len + cmd_hdr_len + cmd_len;
914 __u32 offset = 0;
916 char *msg = kmalloc(len, GFP_KERNEL);
917 if (msg == 0)
918 return 1;
920 put_u32(msg + offset, 0, 1); /* min_announce_proto_version */
921 offset += 4;
922 put_u32(msg + offset, 0, 1); /* max_announce_proto_version */
923 offset += 4;
924 put_u32(msg + offset, 0, 1); /* min_cor_proto_version */
925 offset += 4;
926 put_u32(msg + offset, 0, 1); /* max_cor_proto_version */
927 offset += 4;
930 put_u32(msg + offset, NEIGHCMD_ADDADDR, 1); /* command */
931 offset += 4;
932 put_u32(msg + offset, cmd_len, 1); /* command length */
933 offset += 4;
935 /* addrtypelen, addrlen */
936 put_u16(msg + offset, addrtypelen, 1);
937 offset += 2;
938 put_u16(msg + offset, addrlen, 1);
939 offset += 2;
941 /* addrtype, addr */
942 memcpy(msg + offset, addrtype, addrtypelen);
943 offset += addrtypelen;
944 memcpy(msg + offset, addr, addrlen);
945 offset += addrlen;
947 BUG_ON(offset != len);
949 return set_announce(msg, len);
952 int __init cor_neighbor_init(void)
954 addrlen = 16;
956 addr = kmalloc(addrlen, GFP_KERNEL);
957 if (addr == 0)
958 goto error_free2;
960 get_random_bytes(addr, addrlen);
962 nb_slab = kmem_cache_create("cor_neighbor", sizeof(struct neighbor), 8,
963 0, 0);
964 announce_in_slab = kmem_cache_create("cor_announce_in",
965 sizeof(struct announce_in), 8, 0, 0);
967 if (generate_announce())
968 goto error_free1;
970 memset(&netdev_notify, 0, sizeof(netdev_notify));
971 netdev_notify.notifier_call = netdev_notify_func;
972 register_netdevice_notifier(&netdev_notify);
974 return 0;
976 error_free1:
977 kfree(addr);
979 error_free2:
980 return -ENOMEM;
983 MODULE_LICENSE("GPL");