ping cookies
[cor_2_6_31.git] / net / cor / neighbor.c
blob7a4d360545f8f497c29816862181a63422fb6707
1 /*
2 * Connection oriented routing
3 * Copyright (C) 2007-2009 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include "cor.h"
23 /**
24 * Splited packet data format:
25 * announce proto version [4]
26 * is 0, may be increased if format changes
27 * packet version [4]
28 * starts with 0, increments every time the data field changes
29 * total size [4]
30 * total data size of all merged packets
31 * offset [4]
32 * used to determine the order when merging the split packet
33 * unit is bytes
34 * [data]
35 * commulative checksum [8] (not yet)
36 * chunk 1 contains the checksum of the data in chunk 1
37 * chunk 2 contains the checksum of the data in chunk 1+2
38 * ...
40 * Data format of the announce packet "data" field:
41 * min_announce_proto_version [4]
42 * max_announce_proto_version [4]
43 * min_cor_proto_version [4]
44 * max_cor_proto_version [4]
45 * versions which are understood
47 * command [4]
48 * commandlength [4]
49 * commanddata [commandlength]
52 /* Commands */
54 #define NEIGHCMD_ADDADDR 1
56 /**
57 * Parameter:
58 * addrtypelen [2]
59 * addrtype [addrtypelen]
60 * addrlen [2]
61 * addr [addrlen]
64 #warning todo
65 /**
66 * "I hear you" data format:
67 * challange [todo]
71 DEFINE_MUTEX(neighbor_operation_lock);
73 char *addrtype = "id";
74 char *addr;
75 int addrlen;
78 LIST_HEAD(nb_list);
79 struct kmem_cache *nb_slab;
81 LIST_HEAD(announce_out_list);
83 struct notifier_block netdev_notify;
86 #define ADDRTYPE_UNKNOWN 0
87 #define ADDRTYPE_ID 1
89 static int get_addrtype(__u32 addrtypelen, char *addrtype)
91 if (addrtypelen == 2 &&
92 (addrtype[0] == 'i' || addrtype[0] == 'I') &&
93 (addrtype[1] == 'd' || addrtype[1] == 'D'))
94 return ADDRTYPE_ID;
96 return ADDRTYPE_UNKNOWN;
99 static void neighbor_free(struct ref_counter *ref)
101 struct neighbor *nb = container_of(ref, struct neighbor, refs);
102 BUG_ON(nb->nb_list.next != 0 || nb->nb_list.prev != 0);
103 if (nb->addr != 0)
104 kfree(nb->addr);
105 nb->addr = 0;
106 if (nb->dev != 0)
107 dev_put(nb->dev);
108 nb->dev = 0;
109 kmem_cache_free(nb_slab, nb);
112 static struct ref_counter_def neighbor_ref = {
113 .free = neighbor_free
116 static struct neighbor *alloc_neighbor(gfp_t allocflags)
118 struct neighbor *nb = kmem_cache_alloc(nb_slab, allocflags);
120 __u32 seqno;
122 if (nb == 0)
123 return 0;
125 memset(nb, 0, sizeof(struct neighbor));
127 ref_counter_init(&(nb->refs), &neighbor_ref);
128 mutex_init(&(nb->cmsg_lock));
129 INIT_LIST_HEAD(&(nb->control_msgs_out));
130 nb->last_ping_time = jiffies;
131 atomic_set(&(nb->ooo_packets), 0);
132 get_random_bytes((char *) &seqno, sizeof(seqno));
133 atomic_set(&(nb->kpacket_seqno), seqno);
134 nb->latency = 10;
135 mutex_init(&(nb->conn_list_lock));
136 INIT_LIST_HEAD(&(nb->rcv_conn_list));
137 INIT_LIST_HEAD(&(nb->snd_conn_list));
138 spin_lock_init(&(nb->retrans_lock));
139 spin_lock_init(&(nb->retrans_lock));
140 skb_queue_head_init(&(nb->retrans_list));
142 return nb;
145 struct neighbor *get_neigh_by_mac(struct sk_buff *skb)
147 struct list_head *currlh;
148 struct neighbor *ret = 0;
151 char source_hw[MAX_ADDR_LEN];
152 memset(source_hw, 0, MAX_ADDR_LEN);
153 if (skb->dev->header_ops != 0 &&
154 skb->dev->header_ops->parse != 0)
155 skb->dev->header_ops->parse(skb, source_hw);
157 mutex_lock(&(neighbor_operation_lock));
159 currlh = nb_list.next;
161 while (currlh != &nb_list) {
162 struct neighbor *curr = container_of(currlh, struct neighbor,
163 nb_list);
165 if (memcmp(curr->mac, source_hw, MAX_ADDR_LEN) == 0) {
166 ret = curr;
167 ref_counter_incr(&(ret->refs));
169 goto out;
172 currlh = currlh->next;
175 out:
176 mutex_unlock(&(neighbor_operation_lock));
178 return ret;
181 struct neighbor *find_neigh(__u16 addrtypelen, __u8 *addrtype,
182 __u16 addrlen, __u8 *addr)
184 struct list_head *currlh;
185 struct neighbor *ret = 0;
187 if (get_addrtype(addrtypelen, addrtype) != ADDRTYPE_ID)
188 return 0;
190 mutex_lock(&(neighbor_operation_lock));
192 currlh = nb_list.next;
194 while (currlh != &nb_list) {
195 struct neighbor *curr = container_of(currlh, struct neighbor,
196 nb_list);
198 if (curr->addrlen == addrlen && memcmp(curr->addr, addr,
199 addrlen) == 0) {
200 ret = curr;
201 ref_counter_incr(&(ret->refs));
203 goto out;
206 currlh = currlh->next;
209 out:
210 mutex_unlock(&(neighbor_operation_lock));
212 return ret;
215 __u32 generate_neigh_list(char *buf, __u32 buflen, __u32 limit, __u32 offset)
217 struct list_head *currlh;
219 char *p_totalneighs = buf;
220 char *p_response_rows = buf + 4;
222 __u32 total = 0;
223 __u32 cnt = 0;
225 __u32 buf_offset = 8;
227 BUG_ON(buf == 0);
228 BUG_ON(buflen < 8);
230 mutex_lock(&(neighbor_operation_lock));
232 currlh = nb_list.next;
234 while (currlh != &nb_list) {
235 struct neighbor *curr = container_of(currlh, struct neighbor,
236 nb_list);
238 if (total != cnt)
239 goto cont;
241 if (unlikely(buflen - buf_offset - 6 - 2 - curr->addrlen < 0))
242 goto cont;
244 put_u16(buf + buf_offset, 1, 1);/* numaddr */
245 buf_offset += 2;
246 put_u16(buf + buf_offset, 2, 1);/* addrtypelen */
247 buf_offset += 2;
248 put_u16(buf + buf_offset, curr->addrlen, 1);/* addren */
249 buf_offset += 2;
250 buf[buf_offset] = 'i'; /* addrtype */
251 buf_offset += 1;
252 buf[buf_offset] = 'd';
253 buf_offset += 1;
254 memcpy(buf + buf_offset, curr->addr, curr->addrlen); /* addr */
255 buf_offset += curr->addrlen;
257 BUG_ON(buf_offset > buflen);
259 cnt++;
261 cont:
262 total++;
263 currlh = currlh->next;
266 mutex_unlock(&(neighbor_operation_lock));
268 put_u32(p_totalneighs, total, 1);
269 put_u32(p_response_rows, cnt, 1);
271 return buf_offset;
274 static struct cookie *find_cookie(struct neighbor *nb, __u32 cookie)
276 int i;
278 for(i=0;i<PING_COOKIES_PER_NEIGH;i++) {
279 if (nb->cookies[i].cookie == cookie)
280 return &(nb->cookies[i]);
284 #warning todo
285 void ping_resp(struct neighbor *nb, __u32 cookie, __u32 respdelay)
289 __u32 add_ping_req(struct neighbor *nb)
291 return 1;
296 * Check additional to the checks and timings already done in kpacket_gen.c
297 * This is primarily to make sure that we do not invalidate other ping cookies
298 * which might still receive responses. It does this by requiring a certain
299 * mimimum delay between pings, depending on how many pings are already in
300 * transit.
302 int time_to_send_ping(struct neighbor *nb)
304 if (nb->ping_intransit >= PING_COOKIES_NOTHROTTLE) {
305 __u32 mindelay = (nb->latency/1000) <<
306 (nb->ping_intransit + 1 - PING_COOKIES_NOTHROTTLE);
307 if (mindelay > PING_THROTTLE_LIMIT_MS)
308 mindelay = PING_THROTTLE_LIMIT_MS;
310 if (jiffies_to_msecs(jiffies - nb->last_ping_time) < mindelay)
311 return 0;
314 return 1;
317 static void add_neighbor(struct neighbor *nb)
319 struct list_head *currlh = nb_list.next;
321 BUG_ON((nb->addr == 0) != (nb->addrlen == 0));
323 while (currlh != &nb_list) {
324 struct neighbor *curr = container_of(currlh, struct neighbor,
325 nb_list);
327 if (curr->addrlen == nb->addrlen && memcmp(curr->addr, nb->addr,
328 curr->addrlen) == 0)
329 goto already_present;
331 currlh = currlh->next;
333 #warning todo refcnt
334 list_add_tail(&(nb->nb_list), &nb_list);
335 schedule_controlmsg_timerfunc(nb);
336 setup_timer(&(nb->retrans_timer), retransmit_timerfunc,
337 (unsigned long) nb);
339 if (0) {
340 already_present:
341 kmem_cache_free(nb_slab, nb);
345 static __u32 pull_u32(struct sk_buff *skb, int convbo)
347 char *ptr = cor_pull_skb(skb, 4);
349 __u32 ret = 0;
351 BUG_ON(0 == ptr);
353 ((char *)&ret)[0] = ptr[0];
354 ((char *)&ret)[1] = ptr[1];
355 ((char *)&ret)[2] = ptr[2];
356 ((char *)&ret)[3] = ptr[3];
358 if (convbo)
359 return be32_to_cpu(ret);
360 return ret;
363 static int apply_announce_addaddr(struct neighbor *nb, __u32 cmd, __u32 len,
364 char *cmddata)
366 __u16 addrtypelen;
367 char *addrtype;
368 __u16 addrlen;
369 char *addr;
371 BUG_ON((nb->addr == 0) != (nb->addrlen == 0));
373 if (nb->addr != 0)
374 return 0;
376 if (len < 4)
377 return 0;
379 addrtypelen = be16_to_cpu(*((__u16 *) cmddata));
380 cmddata += 2;
381 len -= 2;
383 if (len < 2)
384 return 0;
386 addrlen = be16_to_cpu(*((__u16 *) cmddata));
387 cmddata += 2;
388 len -= 2;
390 addrtype = cmddata;
391 cmddata += addrtypelen;
392 len -= addrtypelen;
394 addr = cmddata;
395 cmddata += addrlen;
396 len -= addrlen;
398 if (len < 0)
399 return 0;
401 if (get_addrtype(addrtypelen, addrtype) != ADDRTYPE_ID)
402 return 0;
404 nb->addr = kmalloc(addrlen, GFP_KERNEL);
405 if (nb->addr == 0)
406 return 1;
408 memcpy(nb->addr, addr, addrlen);
409 nb->addrlen = addrlen;
411 return 0;
414 static void apply_announce_cmd(struct neighbor *nb, __u32 cmd, __u32 len,
415 char *cmddata)
417 if (cmd == NEIGHCMD_ADDADDR) {
418 apply_announce_addaddr(nb, cmd, len, cmddata);
419 } else {
420 /* ignore unknown cmds */
424 static void apply_announce_cmds(char *msg, __u32 len, struct net_device *dev,
425 char *source_hw)
427 struct neighbor *nb = alloc_neighbor(GFP_KERNEL);
429 if (nb == 0)
430 return;
432 while (len >= 8) {
433 __u32 cmd;
434 __u32 cmdlen;
436 cmd = be32_to_cpu(*((__u32 *) msg));
437 msg += 4;
438 len -= 4;
439 cmdlen = be32_to_cpu(*((__u32 *) msg));
440 msg += 4;
441 len -= 4;
443 BUG_ON(cmdlen > len);
445 apply_announce_cmd(nb, cmd, cmdlen, msg);
447 msg += cmdlen;
448 len -= cmdlen;
451 BUG_ON(len != 0);
453 memcpy(nb->mac, source_hw, MAX_ADDR_LEN);
455 dev_hold(dev);
456 nb->dev = dev;
457 add_neighbor(nb);
461 static int check_announce_cmds(char *msg, __u32 len)
463 while (len >= 8) {
464 __u32 cmd;
465 __u32 cmdlen;
467 cmd = be32_to_cpu(*((__u32 *) msg));
468 msg += 4;
469 len -= 4;
470 cmdlen = be32_to_cpu(*((__u32 *) msg));
471 msg += 4;
472 len -= 4;
474 /* malformated packet */
475 if (cmdlen > len)
476 return 1;
478 msg += cmdlen;
479 len -= cmdlen;
482 if (len != 0)
483 return 1;
485 return 0;
488 static void parse_announce(char *msg, __u32 len, struct net_device *dev,
489 char *source_hw)
491 __u32 min_announce_version;
492 __u32 max_announce_version;
493 __u32 min_cor_version;
494 __u32 max_cor_version;
496 if (len < 16)
497 return;
499 min_announce_version = be32_to_cpu(*((__u32 *) msg));
500 msg += 4;
501 len -= 4;
502 max_announce_version = be32_to_cpu(*((__u32 *) msg));
503 msg += 4;
504 len -= 4;
505 min_cor_version = be32_to_cpu(*((__u32 *) msg));
506 msg += 4;
507 len -= 4;
508 max_cor_version = be32_to_cpu(*((__u32 *) msg));
509 msg += 4;
510 len -= 4;
512 if (min_announce_version != 0)
513 return;
514 if (min_cor_version != 0)
515 return;
516 if (check_announce_cmds(msg, len)) {
517 return;
519 apply_announce_cmds(msg, len, dev, source_hw);
522 struct announce_in {
523 /* lh has to be first */
524 struct list_head lh;
525 struct sk_buff_head skbs; /* sorted by offset */
526 struct net_device *dev;
527 char source_hw[MAX_ADDR_LEN];
528 __u32 announce_proto_version;
529 __u32 packet_version;
530 __u32 total_size;
531 __u32 received_size;
532 __u64 last_received_packet;
535 LIST_HEAD(announce_list);
537 struct kmem_cache *announce_in_slab;
539 static void merge_announce(struct announce_in *ann)
541 char *msg = kmalloc(ann->total_size, GFP_KERNEL);
542 __u32 copy = 0;
544 if (msg == 0) {
545 /* try again when next packet arrives */
546 return;
549 while (copy != ann->total_size) {
550 __u32 currcpy;
551 struct sk_buff *skb;
553 if (skb_queue_empty(&(ann->skbs))) {
554 printk(KERN_ERR "net/cor/neighbor.c: sk_head ran "
555 "empty while merging packets\n");
556 goto free;
559 skb = skb_dequeue(&(ann->skbs));
561 currcpy = skb->len;
563 if (currcpy + copy > ann->total_size)
564 goto free;
566 #warning todo overlapping skbs
567 memcpy(msg + copy, skb->data, currcpy);
568 copy += currcpy;
569 kfree_skb(skb);
572 parse_announce(msg, ann->total_size, ann->dev, ann->source_hw);
574 free:
575 if (msg != 0)
576 kfree(msg);
578 dev_put(ann->dev);
579 list_del(&(ann->lh));
580 kmem_cache_free(announce_in_slab, ann);
583 static int _rcv_announce(struct sk_buff *skb, struct announce_in *ann)
585 struct skb_procstate *ps = skb_pstate(skb);
587 __u32 offset = ps->funcstate.announce.offset;
588 __u32 len = skb->len;
590 __u32 curroffset = 0;
591 __u32 prevoffset = 0;
592 __u32 prevlen = 0;
594 struct sk_buff *curr = ann->skbs.next;
596 if (len + offset > ann->total_size) {
597 /* invalid header */
598 kfree_skb(skb);
599 return 0;
603 * Try to find the right place to insert in the sorted list. This
604 * means to process the list until we find a skb which has a greater
605 * offset, so we can insert before it to keep the sort order. However,
606 * this is complicated by the fact that the new skb must not be inserted
607 * between 2 skbs if there is no data missing in between. So the loop
608 * runs has to keep running until there is either a gap to insert or
609 * we see that this data has already been received.
611 while ((void *) curr != (void *) &(ann->skbs)) {
612 struct skb_procstate *currps = skb_pstate(skb);
614 curroffset = currps->funcstate.announce.offset;
616 if (curroffset > offset && (prevoffset + prevlen) < curroffset)
617 break;
619 prevoffset = curroffset;
620 prevlen = curr->len;
621 curr = curr->next;
623 if ((offset+len) <= (prevoffset+prevlen)) {
624 /* we already have this data */
625 kfree_skb(skb);
626 return 0;
631 * Calculate how much data was really received, by substracting
632 * the bytes we already have.
634 if (unlikely(prevoffset + prevlen > offset)) {
635 len -= (prevoffset + prevlen) - offset;
636 offset = prevoffset + prevlen;
639 if (unlikely((void *) curr != (void *) &(ann->skbs) &&
640 (offset + len) > curroffset))
641 len = curroffset - offset;
643 ann->received_size += len;
644 BUG_ON(ann->received_size > ann->total_size);
645 __skb_queue_before(&(ann->skbs), curr, skb);
646 ann->last_received_packet = get_jiffies_64();
648 if (ann->received_size == ann->total_size)
649 merge_announce(ann);
650 else if (ann->skbs.qlen >= 16)
651 return 1;
653 return 0;
656 void rcv_announce(struct sk_buff *skb)
658 struct skb_procstate *ps = skb_pstate(skb);
659 struct announce_in *curr = 0;
660 struct announce_in *leastactive = 0;
661 __u32 list_size = 0;
663 __u32 announce_proto_version = pull_u32(skb, 1);
664 __u32 packet_version = pull_u32(skb, 1);
665 __u32 total_size = pull_u32(skb, 1);
667 char source_hw[MAX_ADDR_LEN];
668 memset(source_hw, 0, MAX_ADDR_LEN);
669 if (skb->dev->header_ops != 0 &&
670 skb->dev->header_ops->parse != 0)
671 skb->dev->header_ops->parse(skb, source_hw);
673 ps->funcstate.announce.offset = pull_u32(skb, 1);
675 if (total_size > 8192)
676 goto discard;
678 mutex_lock(&(neighbor_operation_lock));
680 if (announce_proto_version != 0)
681 goto discard;
683 curr = (struct announce_in *) announce_list.next;
685 while (((struct list_head *) curr) != &(announce_list)) {
686 list_size++;
687 if (curr->dev == skb->dev &&
688 memcmp(curr->source_hw, source_hw, MAX_ADDR_LEN) == 0 &&
689 curr->announce_proto_version == announce_proto_version &&
690 curr->packet_version == packet_version &&
691 curr->total_size == total_size)
692 goto found;
694 if (leastactive == 0 || curr->last_received_packet <
695 leastactive->last_received_packet)
696 leastactive = curr;
698 curr = (struct announce_in *) curr->lh.next;
701 if (list_size >= 128) {
702 BUG_ON(leastactive == 0);
703 curr = leastactive;
705 curr->last_received_packet = get_jiffies_64();
707 while (!skb_queue_empty(&(curr->skbs))) {
708 struct sk_buff *skb2 = skb_dequeue(&(curr->skbs));
709 kfree_skb(skb2);
712 dev_put(curr->dev);
713 } else {
714 curr = kmem_cache_alloc(announce_in_slab,
715 GFP_KERNEL);
716 if (curr == 0)
717 goto discard;
719 skb_queue_head_init(&(curr->skbs));
720 list_add_tail((struct list_head *) curr, &announce_list);
723 curr->packet_version = packet_version;
724 curr->total_size = total_size;
725 curr->received_size = 0;
726 curr->announce_proto_version = announce_proto_version;
727 curr->dev = skb->dev;
728 dev_hold(curr->dev);
729 memcpy(curr->source_hw, source_hw, MAX_ADDR_LEN);
731 found:
732 if (_rcv_announce(skb, curr)) {
733 list_del((struct list_head *) curr);
734 dev_put(curr->dev);
735 kmem_cache_free(announce_in_slab, curr);
738 if (0) {
739 discard:
740 kfree_skb(skb);
743 mutex_unlock(&(neighbor_operation_lock));
746 struct announce {
747 struct ref_counter refs;
749 __u32 packet_version;
750 char *announce_msg;
751 __u32 announce_msg_len;
754 struct announce *last_announce;
756 struct announce_data {
757 struct delayed_work announce_work;
759 struct net_device *dev;
761 struct announce *ann;
763 struct list_head lh;
765 __u32 curr_announce_msg_offset;
766 __u64 scheduled_announce_timer;
769 static void _splitsend_announce(struct announce_data *ann)
771 struct sk_buff *skb;
772 __u32 packet_size = 256;
773 __u32 remainingdata = ann->ann->announce_msg_len -
774 ann->curr_announce_msg_offset;
775 __u32 headroom = LL_ALLOCATED_SPACE(ann->dev);
776 __u32 overhead = 17 + headroom;
777 char *header;
778 char *ptr;
780 if (remainingdata < packet_size)
781 packet_size = remainingdata;
783 skb = alloc_skb(packet_size + overhead, GFP_KERNEL);
784 if (unlikely(0 == skb))
785 return;
787 skb->protocol = htons(ETH_P_COR);
788 skb->dev = ann->dev;
789 skb_reserve(skb, headroom);
791 if(unlikely(dev_hard_header(skb, ann->dev, ETH_P_COR,
792 ann->dev->broadcast, ann->dev->dev_addr, skb->len) < 0))
793 goto out_err;
795 skb_reset_network_header(skb);
797 header = skb_put(skb, 17);
798 if (unlikely(header == 0))
799 goto out_err;
801 header[0] = PACKET_TYPE_ANNOUNCE;
803 put_u32(header + 1, 0, 1); /* announce proto version */
804 put_u32(header + 5, ann->ann->packet_version, 1); /* packet version */
805 put_u32(header + 9, ann->ann->announce_msg_len, 1); /* total size */
806 put_u32(header + 13, ann->curr_announce_msg_offset, 1); /* offset */
808 ptr = skb_put(skb, packet_size);
809 if (unlikely(ptr == 0))
810 goto out_err;
812 memcpy(ptr, ann->ann->announce_msg + ann->curr_announce_msg_offset, packet_size);
813 dev_queue_xmit(skb);
815 ann->curr_announce_msg_offset += packet_size;
817 if (ann->curr_announce_msg_offset == ann->ann->announce_msg_len)
818 ann->curr_announce_msg_offset = 0;
820 if (0) {
821 out_err:
822 if (skb != 0)
823 kfree_skb(skb);
827 static void splitsend_announce(struct work_struct *work)
829 struct announce_data *ann = container_of(to_delayed_work(work),
830 struct announce_data, announce_work);
831 int reschedule = 0;
833 mutex_lock(&(neighbor_operation_lock));
835 if (ann->dev == 0)
836 goto out;
838 reschedule = 1;
840 if (ann->ann == 0 && last_announce == 0)
841 goto out;
843 if (ann->curr_announce_msg_offset == 0 && ann->ann != last_announce) {
844 if (ann->ann != 0)
845 ref_counter_decr(&(ann->ann->refs));
846 ann->ann = last_announce;
847 ref_counter_incr(&(ann->ann->refs));
850 _splitsend_announce(ann);
851 out:
852 mutex_unlock(&(neighbor_operation_lock));
854 if (reschedule) {
855 int target_delay_ms = 500;
856 int target_delay_jiffies = msecs_to_jiffies(target_delay_ms);
857 __u64 jiffies = get_jiffies_64();
858 int delay;
860 ann->scheduled_announce_timer += target_delay_jiffies;
862 delay = ann->scheduled_announce_timer - jiffies;
863 if (delay < 0)
864 delay = 0;
866 INIT_DELAYED_WORK(&(ann->announce_work), splitsend_announce);
867 schedule_delayed_work(&(ann->announce_work), delay);
871 static void announce_free(struct ref_counter *ref)
873 struct announce *ann = container_of(ref, struct announce, refs);
874 kfree(&(ann->announce_msg));
875 kfree(ann);
878 static struct ref_counter_def announce_ref = {
879 .free = announce_free
882 static struct announce_data *get_announce_by_netdev(struct net_device *dev)
884 struct list_head *lh = announce_out_list.next;
886 while (lh != &announce_out_list) {
887 struct announce_data *curr = (struct announce_data *)(
888 ((char *) lh) -
889 offsetof(struct announce_data, lh));
891 if (curr->dev == dev)
892 return curr;
895 return 0;
898 static void announce_sent_adddev(struct net_device *dev)
900 struct announce_data *ann;
902 ann = kmalloc(sizeof(struct announce_data), GFP_KERNEL);
904 if (ann == 0) {
905 printk(KERN_ERR "cor cannot allocate memory for sending "
906 "announces");
907 return;
910 memset(ann, 0, sizeof(struct announce_data));
912 dev_hold(dev);
913 ann->dev = dev;
915 mutex_lock(&(neighbor_operation_lock));
916 list_add_tail(&(ann->lh), &announce_out_list);
917 mutex_unlock(&(neighbor_operation_lock));
919 ann->scheduled_announce_timer = get_jiffies_64();
920 INIT_DELAYED_WORK(&(ann->announce_work), splitsend_announce);
921 schedule_delayed_work(&(ann->announce_work), 1);
924 static void announce_sent_rmdev(struct net_device *dev)
926 struct announce_data *ann;
928 mutex_lock(&(neighbor_operation_lock));
930 ann = get_announce_by_netdev(dev);
932 if (ann == 0)
933 goto out;
935 dev_put(ann->dev);
936 ann->dev = 0;
938 out:
939 mutex_unlock(&(neighbor_operation_lock));
942 int netdev_notify_func(struct notifier_block *not, unsigned long event,
943 void *ptr)
945 struct net_device *dev = (struct net_device *) ptr;
947 switch(event){
948 case NETDEV_UP:
949 announce_sent_adddev(dev);
950 break;
951 case NETDEV_DOWN:
952 announce_sent_rmdev(dev);
953 break;
954 case NETDEV_REBOOT:
955 case NETDEV_CHANGE:
956 case NETDEV_REGISTER:
957 case NETDEV_UNREGISTER:
958 case NETDEV_CHANGEMTU:
959 case NETDEV_CHANGEADDR:
960 case NETDEV_GOING_DOWN:
961 case NETDEV_CHANGENAME:
962 case NETDEV_FEAT_CHANGE:
963 case NETDEV_BONDING_FAILOVER:
964 break;
965 default:
966 return 1;
969 return 0;
972 static int set_announce(char *msg, __u32 len)
974 struct announce *ann = kmalloc(sizeof(struct announce), GFP_KERNEL);
976 if (ann == 0) {
977 kfree(msg);
978 return 1;
981 memset(ann, 0, sizeof(struct announce));
983 ann->announce_msg = msg;
984 ann->announce_msg_len = len;
986 ref_counter_init(&(ann->refs), &announce_ref);
988 mutex_lock(&(neighbor_operation_lock));
990 if (last_announce != 0) {
991 ann->packet_version = last_announce->packet_version + 1;
992 ref_counter_decr(&(last_announce->refs));
995 last_announce = ann;
997 mutex_unlock(&(neighbor_operation_lock));
999 return 0;
1002 static int generate_announce(void)
1004 __u32 addrtypelen = strlen(addrtype);
1006 __u32 hdr_len = 16;
1007 __u32 cmd_hdr_len = 8;
1008 __u32 cmd_len = 2 + 2 + addrtypelen + addrlen;
1010 __u32 len = hdr_len + cmd_hdr_len + cmd_len;
1011 __u32 offset = 0;
1013 char *msg = kmalloc(len, GFP_KERNEL);
1014 if (msg == 0)
1015 return 1;
1017 put_u32(msg + offset, 0, 1); /* min_announce_proto_version */
1018 offset += 4;
1019 put_u32(msg + offset, 0, 1); /* max_announce_proto_version */
1020 offset += 4;
1021 put_u32(msg + offset, 0, 1); /* min_cor_proto_version */
1022 offset += 4;
1023 put_u32(msg + offset, 0, 1); /* max_cor_proto_version */
1024 offset += 4;
1027 put_u32(msg + offset, NEIGHCMD_ADDADDR, 1); /* command */
1028 offset += 4;
1029 put_u32(msg + offset, cmd_len, 1); /* command length */
1030 offset += 4;
1032 /* addrtypelen, addrlen */
1033 put_u16(msg + offset, addrtypelen, 1);
1034 offset += 2;
1035 put_u16(msg + offset, addrlen, 1);
1036 offset += 2;
1038 /* addrtype, addr */
1039 memcpy(msg + offset, addrtype, addrtypelen);
1040 offset += addrtypelen;
1041 memcpy(msg + offset, addr, addrlen);
1042 offset += addrlen;
1044 BUG_ON(offset != len);
1046 return set_announce(msg, len);
1049 int __init cor_neighbor_init(void)
1051 addrlen = 16;
1053 addr = kmalloc(addrlen, GFP_KERNEL);
1054 if (addr == 0)
1055 goto error_free2;
1057 get_random_bytes(addr, addrlen);
1059 nb_slab = kmem_cache_create("cor_neighbor", sizeof(struct neighbor), 8,
1060 0, 0);
1061 announce_in_slab = kmem_cache_create("cor_announce_in",
1062 sizeof(struct announce_in), 8, 0, 0);
1064 if (generate_announce())
1065 goto error_free1;
1067 memset(&netdev_notify, 0, sizeof(netdev_notify));
1068 netdev_notify.notifier_call = netdev_notify_func;
1069 register_netdevice_notifier(&netdev_notify);
1071 return 0;
1073 error_free1:
1074 kfree(addr);
1076 error_free2:
1077 return -ENOMEM;
1080 MODULE_LICENSE("GPL");