2 * Connection oriented routing
3 * Copyright (C) 2007-2009 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
24 * Splited packet data format:
25 * announce proto version [4]
26 * is 0, may be increased if format changes
28 * starts with 0, increments every time the data field changes
30 * total data size of all merged packets
32 * used to determine the order when merging the split packet
35 * commulative checksum [8] (not yet)
36 * chunk 1 contains the checksum of the data in chunk 1
37 * chunk 2 contains the checksum of the data in chunk 1+2
40 * Data format of the announce packet "data" field:
41 * min_announce_proto_version [4]
42 * max_announce_proto_version [4]
43 * min_cor_proto_version [4]
44 * max_cor_proto_version [4]
45 * versions which are understood
49 * commanddata [commandlength]
54 #define NEIGHCMD_ADDADDR 1
59 * addrtype [addrtypelen]
66 * "I hear you" data format:
71 DEFINE_MUTEX(neighbor_operation_lock
);
73 char *addrtype
= "id";
79 struct kmem_cache
*nb_slab
;
81 LIST_HEAD(announce_out_list
);
83 struct notifier_block netdev_notify
;
86 #define ADDRTYPE_UNKNOWN 0
89 static int get_addrtype(__u32 addrtypelen
, char *addrtype
)
91 if (addrtypelen
== 2 &&
92 (addrtype
[0] == 'i' || addrtype
[0] == 'I') &&
93 (addrtype
[1] == 'd' || addrtype
[1] == 'D'))
96 return ADDRTYPE_UNKNOWN
;
99 static struct neighbor
*alloc_neighbor(gfp_t allocflags
)
101 struct neighbor
*nb
= kmem_cache_alloc(nb_slab
, allocflags
);
106 memset(nb
, 0, sizeof(struct neighbor
));
108 spin_lock_init(&(nb
->cmsg_lock
));
109 /*struct control_msg_out *first_cm;
110 struct control_msg_out *last_cm;
111 unsigned long timedue;*/
113 mutex_init(&(nb
->conn_list_lock
));
114 INIT_LIST_HEAD(&(nb
->rcv_conn_list
));
115 INIT_LIST_HEAD(&(nb
->snd_conn_list
));
116 spin_lock_init(&(nb
->retrans_lock
));
118 skb_queue_head_init(&(nb
->retrans_list
));
123 static void add_neighbor(struct neighbor
*nb
)
125 struct list_head
*currlh
= nb_list
.next
;
127 BUG_ON((nb
->addr
== 0) != (nb
->addrlen
== 0));
129 while (currlh
!= &nb_list
) {
130 struct neighbor
*curr
= container_of(currlh
, struct neighbor
,
133 if (curr
->addrlen
== nb
->addrlen
&& memcmp(curr
->addr
, nb
->addr
,
135 goto already_present
;
137 currlh
= currlh
->next
;
140 list_add_tail(&(nb
->nb_list
), &nb_list
);
141 setup_timer(&(nb
->cmsg_timer
), controlmsg_timerfunc
,
143 setup_timer(&(nb
->retrans_timer
), retransmit_timerfunc
,
148 kmem_cache_free(nb_slab
, nb
);
152 struct neighbor
*find_neigh(__u16 addrtypelen
, __u8
*addrtype
,
153 __u16 addrlen
, __u8
*addr
)
155 struct list_head
*currlh
;
156 struct neighbor
*ret
= 0;
158 if (get_addrtype(addrtypelen
, addrtype
) != ADDRTYPE_ID
)
161 mutex_lock(&(neighbor_operation_lock
));
163 currlh
= nb_list
.next
;
165 while (currlh
!= &nb_list
) {
166 struct neighbor
*curr
= container_of(currlh
, struct neighbor
,
169 if (curr
->addrlen
== addrlen
&& memcmp(curr
->addr
, addr
,
176 currlh
= currlh
->next
;
180 mutex_unlock(&(neighbor_operation_lock
));
185 __u32
generate_neigh_list(char *buf
, __u32 buflen
, __u32 limit
, __u32 offset
)
187 struct list_head
*currlh
;
189 char *p_totalneighs
= buf
;
190 char *p_response_rows
= buf
+ 4;
195 __u32 buf_offset
= 8;
200 mutex_lock(&(neighbor_operation_lock
));
202 currlh
= nb_list
.next
;
204 while (currlh
!= &nb_list
) {
205 struct neighbor
*curr
= container_of(currlh
, struct neighbor
,
211 if (buflen
- buf_offset
- 6 - 2 - curr
->addrlen
< 0)
214 put_u16(buf
+ buf_offset
, 1, 1);/* numaddr */
216 put_u16(buf
+ buf_offset
, 2, 1);/* addrtypelen */
218 put_u16(buf
+ buf_offset
, curr
->addrlen
, 1);/* addren */
220 buf
[buf_offset
] = 'i'; /* addrtype */
222 buf
[buf_offset
] = 'd';
224 memcpy(buf
+ buf_offset
, curr
->addr
, curr
->addrlen
); /* addr */
225 buf_offset
+= curr
->addrlen
;
227 BUG_ON(buf_offset
> buflen
);
233 currlh
= currlh
->next
;
236 mutex_unlock(&(neighbor_operation_lock
));
238 put_u32(p_totalneighs
, total
, 1);
239 put_u32(p_response_rows
, cnt
, 1);
244 static __u32
pull_u32(struct sk_buff
*skb
, int convbo
)
246 char *ptr
= cor_pull_skb(skb
, 4);
252 ((char *)&ret
)[0] = ptr
[0];
253 ((char *)&ret
)[1] = ptr
[1];
254 ((char *)&ret
)[2] = ptr
[2];
255 ((char *)&ret
)[3] = ptr
[3];
258 return be32_to_cpu(ret
);
262 static int apply_announce_addaddr(struct neighbor
*nb
, __u32 cmd
, __u32 len
,
270 BUG_ON((nb
->addr
== 0) != (nb
->addrlen
== 0));
278 addrtypelen
= be16_to_cpu(*((__u16
*) cmddata
));
285 addrlen
= be16_to_cpu(*((__u16
*) cmddata
));
290 cmddata
+= addrtypelen
;
300 if (get_addrtype(addrtypelen
, addrtype
) != ADDRTYPE_ID
)
303 nb
->addr
= kmalloc(addrlen
, GFP_KERNEL
);
307 memcpy(nb
->addr
, addr
, addrlen
);
308 nb
->addrlen
= addrlen
;
313 static void apply_announce_cmd(struct neighbor
*nb
, __u32 cmd
, __u32 len
,
316 if (cmd
== NEIGHCMD_ADDADDR
) {
317 apply_announce_addaddr(nb
, cmd
, len
, cmddata
);
319 /* ignore unknown cmds */
323 static void apply_announce_cmds(char *msg
, __u32 len
, struct net_device
*dev
,
326 struct neighbor
*nb
= alloc_neighbor(GFP_KERNEL
);
335 cmd
= be32_to_cpu(*((__u32
*) msg
));
338 cmdlen
= be32_to_cpu(*((__u32
*) msg
));
342 BUG_ON(cmdlen
> len
);
344 apply_announce_cmd(nb
, cmd
, cmdlen
, msg
);
358 static int check_announce_cmds(char *msg
, __u32 len
)
364 cmd
= be32_to_cpu(*((__u32
*) msg
));
367 cmdlen
= be32_to_cpu(*((__u32
*) msg
));
371 /* malformated packet */
385 static void parse_announce(char *msg
, __u32 len
, struct net_device
*dev
,
388 __u32 min_announce_version
;
389 __u32 max_announce_version
;
390 __u32 min_cor_version
;
391 __u32 max_cor_version
;
396 min_announce_version
= be32_to_cpu(*((__u32
*) msg
));
399 max_announce_version
= be32_to_cpu(*((__u32
*) msg
));
402 min_cor_version
= be32_to_cpu(*((__u32
*) msg
));
405 max_cor_version
= be32_to_cpu(*((__u32
*) msg
));
409 if (min_announce_version
!= 0)
411 if (min_cor_version
!= 0)
414 if (check_announce_cmds(msg
, len
))
417 apply_announce_cmds(msg
, len
, dev
, source_hw
);
421 /* lh has to be first */
423 struct sk_buff_head skbs
; /* sorted by offset */
424 struct net_device
*dev
;
425 char source_hw
[MAX_ADDR_LEN
];
426 __u32 announce_proto_version
;
427 __u32 packet_version
;
430 __u64 last_received_packet
;
433 LIST_HEAD(announce_list
);
435 struct kmem_cache
*announce_in_slab
;
437 static void merge_announce(struct announce_in
*ann
)
439 char *msg
= kmalloc(GFP_KERNEL
, ann
->total_size
);
443 /* try again when next packet arrives */
447 while (copy
!= ann
->total_size
) {
451 if (skb_queue_empty(&(ann
->skbs
))) {
452 printk(KERN_ERR
"net/cor/neighbor.c: sk_head ran "
453 "empty while merging packets\n");
457 skb
= skb_dequeue(&(ann
->skbs
));
461 if (currcpy
+ copy
> ann
->total_size
)
464 #warning todo overlapping skbs
465 memcpy(msg
+ copy
, skb
->data
, currcpy
);
470 parse_announce(msg
, ann
->total_size
, ann
->dev
, ann
->source_hw
);
477 list_del(&(ann
->lh
));
478 kmem_cache_free(announce_in_slab
, ann
);
481 static int _rcv_announce(struct sk_buff
*skb
, struct announce_in
*ann
)
483 struct skb_procstate
*ps
= skb_pstate(skb
);
485 __u32 offset
= ps
->funcstate
.announce
.offset
;
486 __u32 len
= skb
->len
;
488 __u32 curroffset
= 0;
489 __u32 prevoffset
= 0;
492 struct sk_buff
*curr
= ann
->skbs
.next
;
494 if (len
+ offset
> ann
->total_size
) {
501 * Try to find the right place to insert in the sorted list. This
502 * means to process the list until we find a skb which has a greater
503 * offset, so we can insert before it to keep the sort order. However,
504 * this is complicated by the fact that the new skb must not be inserted
505 * between 2 skbs if there is no data missing in between. So the loop
506 * runs has to keep running until there is either a gap to insert or
507 * we see that this data has already been received.
509 while ((void *) curr
!= (void *) &(ann
->skbs
)) {
510 struct skb_procstate
*currps
= skb_pstate(skb
);
512 curroffset
= currps
->funcstate
.announce
.offset
;
514 if (curroffset
> offset
&& (prevoffset
+ prevlen
) < curroffset
)
517 prevoffset
= curroffset
;
521 if ((offset
+len
) <= (prevoffset
+prevlen
)) {
522 /* we already have this data */
529 * Calculate how much data was really received, by substracting
530 * the bytes we already have.
532 if (unlikely(prevoffset
+ prevlen
> offset
)) {
533 len
-= (prevoffset
+ prevlen
) - offset
;
534 offset
= prevoffset
+ prevlen
;
537 if (unlikely((void *) curr
!= (void *) &(ann
->skbs
) &&
538 (offset
+ len
) > curroffset
))
539 len
= curroffset
- offset
;
541 ann
->received_size
+= len
;
542 BUG_ON(ann
->received_size
> ann
->total_size
);
543 __skb_queue_before(&(ann
->skbs
), curr
, skb
);
544 ann
->last_received_packet
= get_jiffies_64();
546 if (ann
->received_size
== ann
->total_size
)
548 else if (ann
->skbs
.qlen
>= 16)
554 void rcv_announce(struct sk_buff
*skb
)
556 struct skb_procstate
*ps
= skb_pstate(skb
);
557 struct announce_in
*curr
= 0;
558 struct announce_in
*leastactive
= 0;
561 __u32 announce_proto_version
= pull_u32(skb
, 1);
562 __u32 packet_version
= pull_u32(skb
, 1);
563 __u32 total_size
= pull_u32(skb
, 1);
565 char source_hw
[MAX_ADDR_LEN
];
566 memset(source_hw
, 0, MAX_ADDR_LEN
);
567 if (skb
->dev
->header_ops
!= 0 &&
568 skb
->dev
->header_ops
->parse
!= 0)
569 skb
->dev
->header_ops
->parse(skb
, source_hw
);
571 ps
->funcstate
.announce
.offset
= pull_u32(skb
, 1);
573 if (total_size
> 8192)
576 mutex_lock(&(neighbor_operation_lock
));
578 if (announce_proto_version
!= 0)
581 curr
= (struct announce_in
*) announce_list
.next
;
583 while (((struct list_head
*) curr
) != &(announce_list
)) {
585 if (curr
->dev
== skb
->dev
&&
586 memcmp(curr
->source_hw
, source_hw
, MAX_ADDR_LEN
) == 0 &&
587 curr
->announce_proto_version
== announce_proto_version
&&
588 curr
->packet_version
== packet_version
&&
589 curr
->total_size
== total_size
)
592 if (leastactive
== 0 || curr
->last_received_packet
<
593 leastactive
->last_received_packet
)
596 curr
= (struct announce_in
*) curr
->lh
.next
;
599 if (list_size
>= 128) {
600 BUG_ON(leastactive
== 0);
603 curr
->last_received_packet
= get_jiffies_64();
605 while (!skb_queue_empty(&(curr
->skbs
))) {
606 struct sk_buff
*skb2
= skb_dequeue(&(curr
->skbs
));
612 curr
= kmem_cache_alloc(announce_in_slab
,
617 skb_queue_head_init(&(curr
->skbs
));
618 list_add_tail((struct list_head
*) curr
, &announce_list
);
621 curr
->packet_version
= packet_version
;
622 curr
->total_size
= total_size
;
623 curr
->received_size
= 0;
624 curr
->announce_proto_version
= announce_proto_version
;
625 curr
->dev
= skb
->dev
;
627 memcpy(curr
->source_hw
, source_hw
, MAX_ADDR_LEN
);
630 if (_rcv_announce(skb
, curr
)) {
631 list_del((struct list_head
*) curr
);
633 kmem_cache_free(announce_in_slab
, curr
);
641 mutex_unlock(&(neighbor_operation_lock
));
645 struct ref_counter refs
;
647 __u32 packet_version
;
649 __u32 announce_msg_len
;
652 struct announce
*last_announce
;
654 struct announce_data
{
655 struct delayed_work announce_work
;
657 struct net_device
*dev
;
659 struct announce
*ann
;
663 __u32 curr_announce_msg_offset
;
664 __u64 scheduled_announce_timer
;
667 static void _splitsend_announce(struct announce_data
*ann
)
670 __u32 packet_size
= 256;
671 __u32 remainingdata
= ann
->ann
->announce_msg_len
-
672 ann
->curr_announce_msg_offset
;
673 __u32 headroom
= LL_ALLOCATED_SPACE(ann
->dev
);
674 __u32 overhead
= 17 + headroom
;
678 if (remainingdata
< packet_size
)
679 packet_size
= remainingdata
;
681 skb
= alloc_skb(packet_size
+ overhead
, GFP_KERNEL
);
682 if (unlikely(0 == skb
))
685 skb
->protocol
= htons(ETH_P_COR
);
687 skb_reserve(skb
, overhead
);
689 if(unlikely(dev_hard_header(skb
, ann
->dev
, ETH_P_COR
,
690 ann
->dev
->broadcast
, ann
->dev
->dev_addr
, skb
->len
) < 0))
693 skb_reset_network_header(skb
);
695 header
= skb_put(skb
, 17);
696 if (unlikely(header
== 0))
699 header
[0] = PACKET_TYPE_ANNOUNCE
;
701 put_u32(header
+ 1, 0, 1); /* announce proto version */
702 put_u32(header
+ 5, ann
->ann
->packet_version
, 1); /* packet version */
703 put_u32(header
+ 9, ann
->ann
->announce_msg_len
, 1); /* total size */
704 put_u32(header
+ 13, ann
->curr_announce_msg_offset
, 1); /* offset */
706 ptr
= skb_put(skb
, packet_size
);
707 if (unlikely(ptr
== 0))
710 memcpy(ptr
, ann
->ann
->announce_msg
+ ann
->curr_announce_msg_offset
, packet_size
);
713 ann
->curr_announce_msg_offset
+= packet_size
;
715 if (ann
->curr_announce_msg_offset
== ann
->ann
->announce_msg_len
)
716 ann
->curr_announce_msg_offset
= 0;
725 static void splitsend_announce(struct work_struct
*work
)
727 struct announce_data
*ann
= (struct announce_data
*) work
;
730 mutex_lock(&(neighbor_operation_lock
));
737 if (ann
->ann
== 0 && last_announce
== 0)
740 if (ann
->curr_announce_msg_offset
== 0 && ann
->ann
!= last_announce
) {
742 ref_counter_decr(&(ann
->ann
->refs
));
743 ann
->ann
= last_announce
;
744 ref_counter_incr(&(ann
->ann
->refs
));
747 _splitsend_announce(ann
);
749 mutex_unlock(&(neighbor_operation_lock
));
752 int target_delay_ms
= 100;
753 int target_delay_jiffies
= msecs_to_jiffies(target_delay_ms
);
754 __u64 jiffies
= get_jiffies_64();
757 ann
->scheduled_announce_timer
+= target_delay_jiffies
;
759 delay
= ann
->scheduled_announce_timer
- jiffies
;
763 INIT_DELAYED_WORK(&(ann
->announce_work
), splitsend_announce
);
764 schedule_delayed_work(&(ann
->announce_work
), delay
);
768 static void announce_free(struct ref_counter
*ref
)
770 struct announce
*ann
= container_of(ref
, struct announce
, refs
);
771 kfree(&(ann
->announce_msg
));
775 static struct ref_counter_def announce_ref
= {
776 .free
= announce_free
780 void blacklist_neighbor(struct neighbor
*nb
)
785 static struct announce_data
*get_announce_by_netdev(struct net_device
*dev
)
787 struct list_head
*lh
= announce_out_list
.next
;
789 while (lh
!= &announce_out_list
) {
790 struct announce_data
*curr
= (struct announce_data
*)(
792 offsetof(struct announce_data
, lh
));
794 if (curr
->dev
== dev
)
801 static void announce_sent_adddev(struct net_device
*dev
)
803 struct announce_data
*ann
;
805 ann
= kmalloc(sizeof(struct announce_data
), GFP_KERNEL
);
808 printk(KERN_ERR
"cor cannot allocate memory for sending "
813 memset(ann
, 0, sizeof(struct announce_data
));
818 mutex_lock(&(neighbor_operation_lock
));
819 list_add_tail(&(ann
->lh
), &announce_out_list
);
820 mutex_unlock(&(neighbor_operation_lock
));
822 ann
->scheduled_announce_timer
= get_jiffies_64();
823 INIT_DELAYED_WORK(&(ann
->announce_work
), splitsend_announce
);
824 schedule_delayed_work(&(ann
->announce_work
), 1);
827 static void announce_sent_rmdev(struct net_device
*dev
)
829 struct announce_data
*ann
;
831 mutex_lock(&(neighbor_operation_lock
));
833 ann
= get_announce_by_netdev(dev
);
842 mutex_unlock(&(neighbor_operation_lock
));
845 int netdev_notify_func(struct notifier_block
*not, unsigned long event
,
848 struct net_device
*dev
= (struct net_device
*) ptr
;
852 announce_sent_adddev(dev
);
855 announce_sent_rmdev(dev
);
859 case NETDEV_REGISTER
:
860 case NETDEV_UNREGISTER
:
861 case NETDEV_CHANGEMTU
:
862 case NETDEV_CHANGEADDR
:
863 case NETDEV_GOING_DOWN
:
864 case NETDEV_CHANGENAME
:
865 case NETDEV_FEAT_CHANGE
:
866 case NETDEV_BONDING_FAILOVER
:
875 static int set_announce(char *msg
, __u32 len
)
877 struct announce
*ann
= kmalloc(sizeof(struct announce
), GFP_KERNEL
);
884 memset(ann
, 0, sizeof(struct announce
));
886 ann
->announce_msg
= msg
;
887 ann
->announce_msg_len
= len
;
889 ref_counter_init(&(ann
->refs
), &announce_ref
);
891 mutex_lock(&(neighbor_operation_lock
));
893 if (last_announce
!= 0) {
894 ann
->packet_version
= last_announce
->packet_version
+ 1;
895 ref_counter_decr(&(last_announce
->refs
));
900 mutex_unlock(&(neighbor_operation_lock
));
905 static int generate_announce(void)
907 __u32 addrtypelen
= strlen(addrtype
);
910 __u32 cmd_hdr_len
= 8;
911 __u32 cmd_len
= 2 + 2 + addrtypelen
+ addrlen
;
913 __u32 len
= hdr_len
+ cmd_hdr_len
+ cmd_len
;
916 char *msg
= kmalloc(len
, GFP_KERNEL
);
920 put_u32(msg
+ offset
, 0, 1); /* min_announce_proto_version */
922 put_u32(msg
+ offset
, 0, 1); /* max_announce_proto_version */
924 put_u32(msg
+ offset
, 0, 1); /* min_cor_proto_version */
926 put_u32(msg
+ offset
, 0, 1); /* max_cor_proto_version */
930 put_u32(msg
+ offset
, NEIGHCMD_ADDADDR
, 1); /* command */
932 put_u32(msg
+ offset
, cmd_len
, 1); /* command length */
935 /* addrtypelen, addrlen */
936 put_u16(msg
+ offset
, addrtypelen
, 1);
938 put_u16(msg
+ offset
, addrlen
, 1);
942 memcpy(msg
+ offset
, addrtype
, addrtypelen
);
943 offset
+= addrtypelen
;
944 memcpy(msg
+ offset
, addr
, addrlen
);
947 BUG_ON(offset
!= len
);
949 return set_announce(msg
, len
);
952 int __init
cor_neighbor_init(void)
956 addr
= kmalloc(addrlen
, GFP_KERNEL
);
960 get_random_bytes(addr
, addrlen
);
962 nb_slab
= kmem_cache_create("cor_neighbor", sizeof(struct neighbor
), 8,
964 announce_in_slab
= kmem_cache_create("cor_announce_in",
965 sizeof(struct announce_in
), 8, 0, 0);
967 if (generate_announce())
970 memset(&netdev_notify
, 0, sizeof(netdev_notify
));
971 netdev_notify
.notifier_call
= netdev_notify_func
;
972 register_netdevice_notifier(&netdev_notify
);
983 MODULE_LICENSE("GPL");