ping cookies
[cor_2_6_31.git] / net / cor / neighbor.c
blob7b68aef31bf4bef2b5188b4a1f72ab4f7dc56caf
1 /*
2 * Connection oriented routing
3 * Copyright (C) 2007-2009 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include "cor.h"
23 /**
24 * Splited packet data format:
25 * announce proto version [4]
26 * is 0, may be increased if format changes
27 * packet version [4]
28 * starts with 0, increments every time the data field changes
29 * total size [4]
30 * total data size of all merged packets
31 * offset [4]
32 * used to determine the order when merging the split packet
33 * unit is bytes
34 * [data]
35 * commulative checksum [8] (not yet)
36 * chunk 1 contains the checksum of the data in chunk 1
37 * chunk 2 contains the checksum of the data in chunk 1+2
38 * ...
40 * Data format of the announce packet "data" field:
41 * min_announce_proto_version [4]
42 * max_announce_proto_version [4]
43 * min_cor_proto_version [4]
44 * max_cor_proto_version [4]
45 * versions which are understood
47 * command [4]
48 * commandlength [4]
49 * commanddata [commandlength]
52 /* Commands */
54 #define NEIGHCMD_ADDADDR 1
56 /**
57 * Parameter:
58 * addrtypelen [2]
59 * addrtype [addrtypelen]
60 * addrlen [2]
61 * addr [addrlen]
64 #warning todo
65 /**
66 * "I hear you" data format:
67 * challange [todo]
71 DEFINE_MUTEX(neighbor_operation_lock);
73 char *addrtype = "id";
74 char *addr;
75 int addrlen;
78 LIST_HEAD(nb_list);
79 struct kmem_cache *nb_slab;
81 LIST_HEAD(announce_out_list);
83 struct notifier_block netdev_notify;
86 #define ADDRTYPE_UNKNOWN 0
87 #define ADDRTYPE_ID 1
89 static int get_addrtype(__u32 addrtypelen, char *addrtype)
91 if (addrtypelen == 2 &&
92 (addrtype[0] == 'i' || addrtype[0] == 'I') &&
93 (addrtype[1] == 'd' || addrtype[1] == 'D'))
94 return ADDRTYPE_ID;
96 return ADDRTYPE_UNKNOWN;
99 static void neighbor_free(struct ref_counter *ref)
101 struct neighbor *nb = container_of(ref, struct neighbor, refs);
102 BUG_ON(nb->nb_list.next != 0 || nb->nb_list.prev != 0);
103 if (nb->addr != 0)
104 kfree(nb->addr);
105 nb->addr = 0;
106 if (nb->dev != 0)
107 dev_put(nb->dev);
108 nb->dev = 0;
109 kmem_cache_free(nb_slab, nb);
112 static struct ref_counter_def neighbor_ref = {
113 .free = neighbor_free
116 static struct neighbor *alloc_neighbor(gfp_t allocflags)
118 struct neighbor *nb = kmem_cache_alloc(nb_slab, allocflags);
120 __u32 seqno;
122 if (nb == 0)
123 return 0;
125 memset(nb, 0, sizeof(struct neighbor));
127 ref_counter_init(&(nb->refs), &neighbor_ref);
128 mutex_init(&(nb->cmsg_lock));
129 INIT_LIST_HEAD(&(nb->control_msgs_out));
130 nb->last_ping_time = jiffies;
131 atomic_set(&(nb->ooo_packets), 0);
132 get_random_bytes((char *) &seqno, sizeof(seqno));
133 atomic_set(&(nb->kpacket_seqno), seqno);
134 atomic_set(&(nb->latency), 0);
135 mutex_init(&(nb->conn_list_lock));
136 INIT_LIST_HEAD(&(nb->rcv_conn_list));
137 INIT_LIST_HEAD(&(nb->snd_conn_list));
138 spin_lock_init(&(nb->retrans_lock));
139 spin_lock_init(&(nb->retrans_lock));
140 skb_queue_head_init(&(nb->retrans_list));
142 return nb;
145 struct neighbor *get_neigh_by_mac(struct sk_buff *skb)
147 struct list_head *currlh;
148 struct neighbor *ret = 0;
151 char source_hw[MAX_ADDR_LEN];
152 memset(source_hw, 0, MAX_ADDR_LEN);
153 if (skb->dev->header_ops != 0 &&
154 skb->dev->header_ops->parse != 0)
155 skb->dev->header_ops->parse(skb, source_hw);
157 mutex_lock(&(neighbor_operation_lock));
159 currlh = nb_list.next;
161 while (currlh != &nb_list) {
162 struct neighbor *curr = container_of(currlh, struct neighbor,
163 nb_list);
165 if (memcmp(curr->mac, source_hw, MAX_ADDR_LEN) == 0) {
166 ret = curr;
167 ref_counter_incr(&(ret->refs));
169 goto out;
172 currlh = currlh->next;
175 out:
176 mutex_unlock(&(neighbor_operation_lock));
178 return ret;
181 struct neighbor *find_neigh(__u16 addrtypelen, __u8 *addrtype,
182 __u16 addrlen, __u8 *addr)
184 struct list_head *currlh;
185 struct neighbor *ret = 0;
187 if (get_addrtype(addrtypelen, addrtype) != ADDRTYPE_ID)
188 return 0;
190 mutex_lock(&(neighbor_operation_lock));
192 currlh = nb_list.next;
194 while (currlh != &nb_list) {
195 struct neighbor *curr = container_of(currlh, struct neighbor,
196 nb_list);
198 if (curr->addrlen == addrlen && memcmp(curr->addr, addr,
199 addrlen) == 0) {
200 ret = curr;
201 ref_counter_incr(&(ret->refs));
203 goto out;
206 currlh = currlh->next;
209 out:
210 mutex_unlock(&(neighbor_operation_lock));
212 return ret;
215 __u32 generate_neigh_list(char *buf, __u32 buflen, __u32 limit, __u32 offset)
217 struct list_head *currlh;
219 char *p_totalneighs = buf;
220 char *p_response_rows = buf + 4;
222 __u32 total = 0;
223 __u32 cnt = 0;
225 __u32 buf_offset = 8;
227 BUG_ON(buf == 0);
228 BUG_ON(buflen < 8);
230 mutex_lock(&(neighbor_operation_lock));
232 currlh = nb_list.next;
234 while (currlh != &nb_list) {
235 struct neighbor *curr = container_of(currlh, struct neighbor,
236 nb_list);
238 if (total != cnt)
239 goto cont;
241 if (unlikely(buflen - buf_offset - 6 - 2 - curr->addrlen < 0))
242 goto cont;
244 put_u16(buf + buf_offset, 1, 1);/* numaddr */
245 buf_offset += 2;
246 put_u16(buf + buf_offset, 2, 1);/* addrtypelen */
247 buf_offset += 2;
248 put_u16(buf + buf_offset, curr->addrlen, 1);/* addren */
249 buf_offset += 2;
250 buf[buf_offset] = 'i'; /* addrtype */
251 buf_offset += 1;
252 buf[buf_offset] = 'd';
253 buf_offset += 1;
254 memcpy(buf + buf_offset, curr->addr, curr->addrlen); /* addr */
255 buf_offset += curr->addrlen;
257 BUG_ON(buf_offset > buflen);
259 cnt++;
261 cont:
262 total++;
263 currlh = currlh->next;
266 mutex_unlock(&(neighbor_operation_lock));
268 put_u32(p_totalneighs, total, 1);
269 put_u32(p_response_rows, cnt, 1);
271 return buf_offset;
274 static struct ping_cookie *find_cookie(struct neighbor *nb, __u32 cookie)
276 int i;
278 for(i=0;i<PING_COOKIES_PER_NEIGH;i++) {
279 if (nb->cookies[i].cookie == cookie)
280 return &(nb->cookies[i]);
282 return 0;
285 void ping_resp(struct neighbor *nb, __u32 cookie, __u32 respdelay)
287 struct ping_cookie *c = find_cookie(nb, cookie);
288 int i;
290 __s64 newlatency;
292 if (c == 0)
293 return;
295 newlatency = ((((__s64) ((__u32)atomic_read(&(nb->latency)))) * 15 +
296 jiffies_to_usecs(jiffies - c->time) - respdelay) / 16);
297 if (unlikely(newlatency < 0))
298 newlatency = 0;
299 if (unlikely(newlatency > (((__s64)256)*256*256*256 - 1)))
300 newlatency = ((__s64)256)*256*256*256 - 1;
302 atomic_set(&(nb->latency), (__u32) newlatency);
304 c->cookie = 0;
305 nb->ping_intransit--;
307 for(i=0;i<PING_COOKIES_PER_NEIGH;i++) {
308 if (nb->cookies[i].cookie != 0 &&
309 time_before(nb->cookies[i].time, c->time)) {
310 nb->cookies[i].pongs++;
311 if (nb->cookies[i].pongs >= PING_PONGLIMIT) {
312 nb->cookies[i].cookie = 0;
313 nb->cookies[i].pongs = 0;
314 nb->ping_intransit--;
320 __u32 add_ping_req(struct neighbor *nb)
322 struct ping_cookie *c;
323 int i;
325 for (i=0;i<PING_COOKIES_PER_NEIGH;i++) {
326 if (nb->cookies[i].cookie == 0)
327 goto found;
330 get_random_bytes((char *) &i, sizeof(i));
331 i = i % (PING_COOKIES_PER_NEIGH - PING_COOKIES_FIFO) +
332 PING_COOKIES_FIFO;
334 found:
335 c = &(nb->cookies[i]);
336 c->time = jiffies;
337 c->pongs = 0;
338 nb->lastcookie++;
339 if (unlikely(nb->lastcookie == 0))
340 nb->lastcookie++;
341 c->cookie = nb->lastcookie;
343 nb->ping_intransit++;
345 return c->cookie;
350 * Check additional to the checks and timings already done in kpacket_gen.c
351 * This is primarily to make sure that we do not invalidate other ping cookies
352 * which might still receive responses. It does this by requiring a certain
353 * mimimum delay between pings, depending on how many pings are already in
354 * transit.
356 int time_to_send_ping(struct neighbor *nb)
358 if (nb->ping_intransit >= PING_COOKIES_NOTHROTTLE) {
359 __u32 mindelay = (((__u32)atomic_read(&(nb->latency)))/1000) <<
360 (nb->ping_intransit + 1 -
361 PING_COOKIES_NOTHROTTLE);
362 if (mindelay > PING_THROTTLE_LIMIT_MS)
363 mindelay = PING_THROTTLE_LIMIT_MS;
365 if (jiffies_to_msecs(jiffies - nb->last_ping_time) < mindelay)
366 return 0;
369 return 1;
372 static void add_neighbor(struct neighbor *nb)
374 struct list_head *currlh = nb_list.next;
376 BUG_ON((nb->addr == 0) != (nb->addrlen == 0));
378 while (currlh != &nb_list) {
379 struct neighbor *curr = container_of(currlh, struct neighbor,
380 nb_list);
382 if (curr->addrlen == nb->addrlen && memcmp(curr->addr, nb->addr,
383 curr->addrlen) == 0)
384 goto already_present;
386 currlh = currlh->next;
388 #warning todo refcnt
389 list_add_tail(&(nb->nb_list), &nb_list);
390 schedule_controlmsg_timerfunc(nb);
391 setup_timer(&(nb->retrans_timer), retransmit_timerfunc,
392 (unsigned long) nb);
394 if (0) {
395 already_present:
396 kmem_cache_free(nb_slab, nb);
400 static __u32 pull_u32(struct sk_buff *skb, int convbo)
402 char *ptr = cor_pull_skb(skb, 4);
404 __u32 ret = 0;
406 BUG_ON(0 == ptr);
408 ((char *)&ret)[0] = ptr[0];
409 ((char *)&ret)[1] = ptr[1];
410 ((char *)&ret)[2] = ptr[2];
411 ((char *)&ret)[3] = ptr[3];
413 if (convbo)
414 return be32_to_cpu(ret);
415 return ret;
418 static int apply_announce_addaddr(struct neighbor *nb, __u32 cmd, __u32 len,
419 char *cmddata)
421 __u16 addrtypelen;
422 char *addrtype;
423 __u16 addrlen;
424 char *addr;
426 BUG_ON((nb->addr == 0) != (nb->addrlen == 0));
428 if (nb->addr != 0)
429 return 0;
431 if (len < 4)
432 return 0;
434 addrtypelen = be16_to_cpu(*((__u16 *) cmddata));
435 cmddata += 2;
436 len -= 2;
438 if (len < 2)
439 return 0;
441 addrlen = be16_to_cpu(*((__u16 *) cmddata));
442 cmddata += 2;
443 len -= 2;
445 addrtype = cmddata;
446 cmddata += addrtypelen;
447 len -= addrtypelen;
449 addr = cmddata;
450 cmddata += addrlen;
451 len -= addrlen;
453 if (len < 0)
454 return 0;
456 if (get_addrtype(addrtypelen, addrtype) != ADDRTYPE_ID)
457 return 0;
459 nb->addr = kmalloc(addrlen, GFP_KERNEL);
460 if (nb->addr == 0)
461 return 1;
463 memcpy(nb->addr, addr, addrlen);
464 nb->addrlen = addrlen;
466 return 0;
469 static void apply_announce_cmd(struct neighbor *nb, __u32 cmd, __u32 len,
470 char *cmddata)
472 if (cmd == NEIGHCMD_ADDADDR) {
473 apply_announce_addaddr(nb, cmd, len, cmddata);
474 } else {
475 /* ignore unknown cmds */
479 static void apply_announce_cmds(char *msg, __u32 len, struct net_device *dev,
480 char *source_hw)
482 struct neighbor *nb = alloc_neighbor(GFP_KERNEL);
484 if (nb == 0)
485 return;
487 while (len >= 8) {
488 __u32 cmd;
489 __u32 cmdlen;
491 cmd = be32_to_cpu(*((__u32 *) msg));
492 msg += 4;
493 len -= 4;
494 cmdlen = be32_to_cpu(*((__u32 *) msg));
495 msg += 4;
496 len -= 4;
498 BUG_ON(cmdlen > len);
500 apply_announce_cmd(nb, cmd, cmdlen, msg);
502 msg += cmdlen;
503 len -= cmdlen;
506 BUG_ON(len != 0);
508 memcpy(nb->mac, source_hw, MAX_ADDR_LEN);
510 dev_hold(dev);
511 nb->dev = dev;
512 add_neighbor(nb);
516 static int check_announce_cmds(char *msg, __u32 len)
518 while (len >= 8) {
519 __u32 cmd;
520 __u32 cmdlen;
522 cmd = be32_to_cpu(*((__u32 *) msg));
523 msg += 4;
524 len -= 4;
525 cmdlen = be32_to_cpu(*((__u32 *) msg));
526 msg += 4;
527 len -= 4;
529 /* malformated packet */
530 if (cmdlen > len)
531 return 1;
533 msg += cmdlen;
534 len -= cmdlen;
537 if (len != 0)
538 return 1;
540 return 0;
543 static void parse_announce(char *msg, __u32 len, struct net_device *dev,
544 char *source_hw)
546 __u32 min_announce_version;
547 __u32 max_announce_version;
548 __u32 min_cor_version;
549 __u32 max_cor_version;
551 if (len < 16)
552 return;
554 min_announce_version = be32_to_cpu(*((__u32 *) msg));
555 msg += 4;
556 len -= 4;
557 max_announce_version = be32_to_cpu(*((__u32 *) msg));
558 msg += 4;
559 len -= 4;
560 min_cor_version = be32_to_cpu(*((__u32 *) msg));
561 msg += 4;
562 len -= 4;
563 max_cor_version = be32_to_cpu(*((__u32 *) msg));
564 msg += 4;
565 len -= 4;
567 if (min_announce_version != 0)
568 return;
569 if (min_cor_version != 0)
570 return;
571 if (check_announce_cmds(msg, len)) {
572 return;
574 apply_announce_cmds(msg, len, dev, source_hw);
577 struct announce_in {
578 /* lh has to be first */
579 struct list_head lh;
580 struct sk_buff_head skbs; /* sorted by offset */
581 struct net_device *dev;
582 char source_hw[MAX_ADDR_LEN];
583 __u32 announce_proto_version;
584 __u32 packet_version;
585 __u32 total_size;
586 __u32 received_size;
587 __u64 last_received_packet;
590 LIST_HEAD(announce_list);
592 struct kmem_cache *announce_in_slab;
594 static void merge_announce(struct announce_in *ann)
596 char *msg = kmalloc(ann->total_size, GFP_KERNEL);
597 __u32 copy = 0;
599 if (msg == 0) {
600 /* try again when next packet arrives */
601 return;
604 while (copy != ann->total_size) {
605 __u32 currcpy;
606 struct sk_buff *skb;
608 if (skb_queue_empty(&(ann->skbs))) {
609 printk(KERN_ERR "net/cor/neighbor.c: sk_head ran "
610 "empty while merging packets\n");
611 goto free;
614 skb = skb_dequeue(&(ann->skbs));
616 currcpy = skb->len;
618 if (currcpy + copy > ann->total_size)
619 goto free;
621 #warning todo overlapping skbs
622 memcpy(msg + copy, skb->data, currcpy);
623 copy += currcpy;
624 kfree_skb(skb);
627 parse_announce(msg, ann->total_size, ann->dev, ann->source_hw);
629 free:
630 if (msg != 0)
631 kfree(msg);
633 dev_put(ann->dev);
634 list_del(&(ann->lh));
635 kmem_cache_free(announce_in_slab, ann);
638 static int _rcv_announce(struct sk_buff *skb, struct announce_in *ann)
640 struct skb_procstate *ps = skb_pstate(skb);
642 __u32 offset = ps->funcstate.announce.offset;
643 __u32 len = skb->len;
645 __u32 curroffset = 0;
646 __u32 prevoffset = 0;
647 __u32 prevlen = 0;
649 struct sk_buff *curr = ann->skbs.next;
651 if (len + offset > ann->total_size) {
652 /* invalid header */
653 kfree_skb(skb);
654 return 0;
658 * Try to find the right place to insert in the sorted list. This
659 * means to process the list until we find a skb which has a greater
660 * offset, so we can insert before it to keep the sort order. However,
661 * this is complicated by the fact that the new skb must not be inserted
662 * between 2 skbs if there is no data missing in between. So the loop
663 * runs has to keep running until there is either a gap to insert or
664 * we see that this data has already been received.
666 while ((void *) curr != (void *) &(ann->skbs)) {
667 struct skb_procstate *currps = skb_pstate(skb);
669 curroffset = currps->funcstate.announce.offset;
671 if (curroffset > offset && (prevoffset + prevlen) < curroffset)
672 break;
674 prevoffset = curroffset;
675 prevlen = curr->len;
676 curr = curr->next;
678 if ((offset+len) <= (prevoffset+prevlen)) {
679 /* we already have this data */
680 kfree_skb(skb);
681 return 0;
686 * Calculate how much data was really received, by substracting
687 * the bytes we already have.
689 if (unlikely(prevoffset + prevlen > offset)) {
690 len -= (prevoffset + prevlen) - offset;
691 offset = prevoffset + prevlen;
694 if (unlikely((void *) curr != (void *) &(ann->skbs) &&
695 (offset + len) > curroffset))
696 len = curroffset - offset;
698 ann->received_size += len;
699 BUG_ON(ann->received_size > ann->total_size);
700 __skb_queue_before(&(ann->skbs), curr, skb);
701 ann->last_received_packet = get_jiffies_64();
703 if (ann->received_size == ann->total_size)
704 merge_announce(ann);
705 else if (ann->skbs.qlen >= 16)
706 return 1;
708 return 0;
711 void rcv_announce(struct sk_buff *skb)
713 struct skb_procstate *ps = skb_pstate(skb);
714 struct announce_in *curr = 0;
715 struct announce_in *leastactive = 0;
716 __u32 list_size = 0;
718 __u32 announce_proto_version = pull_u32(skb, 1);
719 __u32 packet_version = pull_u32(skb, 1);
720 __u32 total_size = pull_u32(skb, 1);
722 char source_hw[MAX_ADDR_LEN];
723 memset(source_hw, 0, MAX_ADDR_LEN);
724 if (skb->dev->header_ops != 0 &&
725 skb->dev->header_ops->parse != 0)
726 skb->dev->header_ops->parse(skb, source_hw);
728 ps->funcstate.announce.offset = pull_u32(skb, 1);
730 if (total_size > 8192)
731 goto discard;
733 mutex_lock(&(neighbor_operation_lock));
735 if (announce_proto_version != 0)
736 goto discard;
738 curr = (struct announce_in *) announce_list.next;
740 while (((struct list_head *) curr) != &(announce_list)) {
741 list_size++;
742 if (curr->dev == skb->dev &&
743 memcmp(curr->source_hw, source_hw, MAX_ADDR_LEN) == 0 &&
744 curr->announce_proto_version == announce_proto_version &&
745 curr->packet_version == packet_version &&
746 curr->total_size == total_size)
747 goto found;
749 if (leastactive == 0 || curr->last_received_packet <
750 leastactive->last_received_packet)
751 leastactive = curr;
753 curr = (struct announce_in *) curr->lh.next;
756 if (list_size >= 128) {
757 BUG_ON(leastactive == 0);
758 curr = leastactive;
760 curr->last_received_packet = get_jiffies_64();
762 while (!skb_queue_empty(&(curr->skbs))) {
763 struct sk_buff *skb2 = skb_dequeue(&(curr->skbs));
764 kfree_skb(skb2);
767 dev_put(curr->dev);
768 } else {
769 curr = kmem_cache_alloc(announce_in_slab,
770 GFP_KERNEL);
771 if (curr == 0)
772 goto discard;
774 skb_queue_head_init(&(curr->skbs));
775 list_add_tail((struct list_head *) curr, &announce_list);
778 curr->packet_version = packet_version;
779 curr->total_size = total_size;
780 curr->received_size = 0;
781 curr->announce_proto_version = announce_proto_version;
782 curr->dev = skb->dev;
783 dev_hold(curr->dev);
784 memcpy(curr->source_hw, source_hw, MAX_ADDR_LEN);
786 found:
787 if (_rcv_announce(skb, curr)) {
788 list_del((struct list_head *) curr);
789 dev_put(curr->dev);
790 kmem_cache_free(announce_in_slab, curr);
793 if (0) {
794 discard:
795 kfree_skb(skb);
798 mutex_unlock(&(neighbor_operation_lock));
801 struct announce {
802 struct ref_counter refs;
804 __u32 packet_version;
805 char *announce_msg;
806 __u32 announce_msg_len;
809 struct announce *last_announce;
811 struct announce_data {
812 struct delayed_work announce_work;
814 struct net_device *dev;
816 struct announce *ann;
818 struct list_head lh;
820 __u32 curr_announce_msg_offset;
821 __u64 scheduled_announce_timer;
824 static void _splitsend_announce(struct announce_data *ann)
826 struct sk_buff *skb;
827 __u32 packet_size = 256;
828 __u32 remainingdata = ann->ann->announce_msg_len -
829 ann->curr_announce_msg_offset;
830 __u32 headroom = LL_ALLOCATED_SPACE(ann->dev);
831 __u32 overhead = 17 + headroom;
832 char *header;
833 char *ptr;
835 if (remainingdata < packet_size)
836 packet_size = remainingdata;
838 skb = alloc_skb(packet_size + overhead, GFP_KERNEL);
839 if (unlikely(0 == skb))
840 return;
842 skb->protocol = htons(ETH_P_COR);
843 skb->dev = ann->dev;
844 skb_reserve(skb, headroom);
846 if(unlikely(dev_hard_header(skb, ann->dev, ETH_P_COR,
847 ann->dev->broadcast, ann->dev->dev_addr, skb->len) < 0))
848 goto out_err;
850 skb_reset_network_header(skb);
852 header = skb_put(skb, 17);
853 if (unlikely(header == 0))
854 goto out_err;
856 header[0] = PACKET_TYPE_ANNOUNCE;
858 put_u32(header + 1, 0, 1); /* announce proto version */
859 put_u32(header + 5, ann->ann->packet_version, 1); /* packet version */
860 put_u32(header + 9, ann->ann->announce_msg_len, 1); /* total size */
861 put_u32(header + 13, ann->curr_announce_msg_offset, 1); /* offset */
863 ptr = skb_put(skb, packet_size);
864 if (unlikely(ptr == 0))
865 goto out_err;
867 memcpy(ptr, ann->ann->announce_msg + ann->curr_announce_msg_offset, packet_size);
868 dev_queue_xmit(skb);
870 ann->curr_announce_msg_offset += packet_size;
872 if (ann->curr_announce_msg_offset == ann->ann->announce_msg_len)
873 ann->curr_announce_msg_offset = 0;
875 if (0) {
876 out_err:
877 if (skb != 0)
878 kfree_skb(skb);
882 static void splitsend_announce(struct work_struct *work)
884 struct announce_data *ann = container_of(to_delayed_work(work),
885 struct announce_data, announce_work);
886 int reschedule = 0;
888 mutex_lock(&(neighbor_operation_lock));
890 if (ann->dev == 0)
891 goto out;
893 reschedule = 1;
895 if (ann->ann == 0 && last_announce == 0)
896 goto out;
898 if (ann->curr_announce_msg_offset == 0 && ann->ann != last_announce) {
899 if (ann->ann != 0)
900 ref_counter_decr(&(ann->ann->refs));
901 ann->ann = last_announce;
902 ref_counter_incr(&(ann->ann->refs));
905 _splitsend_announce(ann);
906 out:
907 mutex_unlock(&(neighbor_operation_lock));
909 if (reschedule) {
910 int target_delay_ms = 500;
911 int target_delay_jiffies = msecs_to_jiffies(target_delay_ms);
912 __u64 jiffies = get_jiffies_64();
913 int delay;
915 ann->scheduled_announce_timer += target_delay_jiffies;
917 delay = ann->scheduled_announce_timer - jiffies;
918 if (delay < 0)
919 delay = 0;
921 INIT_DELAYED_WORK(&(ann->announce_work), splitsend_announce);
922 schedule_delayed_work(&(ann->announce_work), delay);
926 static void announce_free(struct ref_counter *ref)
928 struct announce *ann = container_of(ref, struct announce, refs);
929 kfree(&(ann->announce_msg));
930 kfree(ann);
933 static struct ref_counter_def announce_ref = {
934 .free = announce_free
937 static struct announce_data *get_announce_by_netdev(struct net_device *dev)
939 struct list_head *lh = announce_out_list.next;
941 while (lh != &announce_out_list) {
942 struct announce_data *curr = (struct announce_data *)(
943 ((char *) lh) -
944 offsetof(struct announce_data, lh));
946 if (curr->dev == dev)
947 return curr;
950 return 0;
953 static void announce_sent_adddev(struct net_device *dev)
955 struct announce_data *ann;
957 ann = kmalloc(sizeof(struct announce_data), GFP_KERNEL);
959 if (ann == 0) {
960 printk(KERN_ERR "cor cannot allocate memory for sending "
961 "announces");
962 return;
965 memset(ann, 0, sizeof(struct announce_data));
967 dev_hold(dev);
968 ann->dev = dev;
970 mutex_lock(&(neighbor_operation_lock));
971 list_add_tail(&(ann->lh), &announce_out_list);
972 mutex_unlock(&(neighbor_operation_lock));
974 ann->scheduled_announce_timer = get_jiffies_64();
975 INIT_DELAYED_WORK(&(ann->announce_work), splitsend_announce);
976 schedule_delayed_work(&(ann->announce_work), 1);
979 static void announce_sent_rmdev(struct net_device *dev)
981 struct announce_data *ann;
983 mutex_lock(&(neighbor_operation_lock));
985 ann = get_announce_by_netdev(dev);
987 if (ann == 0)
988 goto out;
990 dev_put(ann->dev);
991 ann->dev = 0;
993 out:
994 mutex_unlock(&(neighbor_operation_lock));
997 int netdev_notify_func(struct notifier_block *not, unsigned long event,
998 void *ptr)
1000 struct net_device *dev = (struct net_device *) ptr;
1002 switch(event){
1003 case NETDEV_UP:
1004 announce_sent_adddev(dev);
1005 break;
1006 case NETDEV_DOWN:
1007 announce_sent_rmdev(dev);
1008 break;
1009 case NETDEV_REBOOT:
1010 case NETDEV_CHANGE:
1011 case NETDEV_REGISTER:
1012 case NETDEV_UNREGISTER:
1013 case NETDEV_CHANGEMTU:
1014 case NETDEV_CHANGEADDR:
1015 case NETDEV_GOING_DOWN:
1016 case NETDEV_CHANGENAME:
1017 case NETDEV_FEAT_CHANGE:
1018 case NETDEV_BONDING_FAILOVER:
1019 break;
1020 default:
1021 return 1;
1024 return 0;
1027 static int set_announce(char *msg, __u32 len)
1029 struct announce *ann = kmalloc(sizeof(struct announce), GFP_KERNEL);
1031 if (ann == 0) {
1032 kfree(msg);
1033 return 1;
1036 memset(ann, 0, sizeof(struct announce));
1038 ann->announce_msg = msg;
1039 ann->announce_msg_len = len;
1041 ref_counter_init(&(ann->refs), &announce_ref);
1043 mutex_lock(&(neighbor_operation_lock));
1045 if (last_announce != 0) {
1046 ann->packet_version = last_announce->packet_version + 1;
1047 ref_counter_decr(&(last_announce->refs));
1050 last_announce = ann;
1052 mutex_unlock(&(neighbor_operation_lock));
1054 return 0;
1057 static int generate_announce(void)
1059 __u32 addrtypelen = strlen(addrtype);
1061 __u32 hdr_len = 16;
1062 __u32 cmd_hdr_len = 8;
1063 __u32 cmd_len = 2 + 2 + addrtypelen + addrlen;
1065 __u32 len = hdr_len + cmd_hdr_len + cmd_len;
1066 __u32 offset = 0;
1068 char *msg = kmalloc(len, GFP_KERNEL);
1069 if (msg == 0)
1070 return 1;
1072 put_u32(msg + offset, 0, 1); /* min_announce_proto_version */
1073 offset += 4;
1074 put_u32(msg + offset, 0, 1); /* max_announce_proto_version */
1075 offset += 4;
1076 put_u32(msg + offset, 0, 1); /* min_cor_proto_version */
1077 offset += 4;
1078 put_u32(msg + offset, 0, 1); /* max_cor_proto_version */
1079 offset += 4;
1082 put_u32(msg + offset, NEIGHCMD_ADDADDR, 1); /* command */
1083 offset += 4;
1084 put_u32(msg + offset, cmd_len, 1); /* command length */
1085 offset += 4;
1087 /* addrtypelen, addrlen */
1088 put_u16(msg + offset, addrtypelen, 1);
1089 offset += 2;
1090 put_u16(msg + offset, addrlen, 1);
1091 offset += 2;
1093 /* addrtype, addr */
1094 memcpy(msg + offset, addrtype, addrtypelen);
1095 offset += addrtypelen;
1096 memcpy(msg + offset, addr, addrlen);
1097 offset += addrlen;
1099 BUG_ON(offset != len);
1101 return set_announce(msg, len);
1104 int __init cor_neighbor_init(void)
1106 addrlen = 16;
1108 addr = kmalloc(addrlen, GFP_KERNEL);
1109 if (addr == 0)
1110 goto error_free2;
1112 get_random_bytes(addr, addrlen);
1114 nb_slab = kmem_cache_create("cor_neighbor", sizeof(struct neighbor), 8,
1115 0, 0);
1116 announce_in_slab = kmem_cache_create("cor_announce_in",
1117 sizeof(struct announce_in), 8, 0, 0);
1119 if (generate_announce())
1120 goto error_free1;
1122 memset(&netdev_notify, 0, sizeof(netdev_notify));
1123 netdev_notify.notifier_call = netdev_notify_func;
1124 register_netdevice_notifier(&netdev_notify);
1126 return 0;
1128 error_free1:
1129 kfree(addr);
1131 error_free2:
1132 return -ENOMEM;
1135 MODULE_LICENSE("GPL");