2 * Connection oriented routing
3 * Copyright (C) 2007-2009 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
24 * Splited packet data format:
25 * announce proto version [4]
26 * is 0, may be increased if format changes
28 * starts with 0, increments every time the data field changes
30 * total data size of all merged packets
32 * used to determine the order when merging the split packet
35 * commulative checksum [8] (not yet)
36 * chunk 1 contains the checksum of the data in chunk 1
37 * chunk 2 contains the checksum of the data in chunk 1+2
40 * Data format of the announce packet "data" field:
41 * min_announce_proto_version [4]
42 * max_announce_proto_version [4]
43 * min_cor_proto_version [4]
44 * max_cor_proto_version [4]
45 * versions which are understood
49 * commanddata [commandlength]
54 #define NEIGHCMD_ADDADDR 1
59 * addrtype [addrtypelen]
66 * "I hear you" data format:
71 DEFINE_MUTEX(neighbor_operation_lock
);
73 char *addrtype
= "id";
79 struct kmem_cache
*nb_slab
;
81 LIST_HEAD(announce_out_list
);
83 struct notifier_block netdev_notify
;
86 #define ADDRTYPE_UNKNOWN 0
89 static int get_addrtype(__u32 addrtypelen
, char *addrtype
)
91 if (addrtypelen
== 2 &&
92 (addrtype
[0] == 'i' || addrtype
[0] == 'I') &&
93 (addrtype
[1] == 'd' || addrtype
[1] == 'D'))
96 return ADDRTYPE_UNKNOWN
;
99 void neighbor_free(struct kref
*ref
)
101 struct neighbor
*nb
= container_of(ref
, struct neighbor
, ref
);
102 BUG_ON(nb
->nb_list
.next
!= 0 || nb
->nb_list
.prev
!= 0);
109 kmem_cache_free(nb_slab
, nb
);
112 static struct neighbor
*alloc_neighbor(gfp_t allocflags
)
114 struct neighbor
*nb
= kmem_cache_alloc(nb_slab
, allocflags
);
121 memset(nb
, 0, sizeof(struct neighbor
));
123 kref_init(&(nb
->ref
));
124 mutex_init(&(nb
->cmsg_lock
));
125 INIT_LIST_HEAD(&(nb
->control_msgs_out
));
126 nb
->last_ping_time
= jiffies
;
127 atomic_set(&(nb
->ooo_packets
), 0);
128 get_random_bytes((char *) &seqno
, sizeof(seqno
));
129 mutex_init(&(nb
->pingcookie_lock
));
130 atomic_set(&(nb
->latency
), 0);
131 mutex_init(&(nb
->state_lock
));
132 atomic_set(&(nb
->kpacket_seqno
), seqno
);
133 mutex_init(&(nb
->conn_list_lock
));
134 INIT_LIST_HEAD(&(nb
->rcv_conn_list
));
135 INIT_LIST_HEAD(&(nb
->snd_conn_list
));
136 spin_lock_init(&(nb
->retrans_lock
));
137 spin_lock_init(&(nb
->retrans_lock
));
138 skb_queue_head_init(&(nb
->retrans_list
));
143 struct neighbor
*get_neigh_by_mac(struct sk_buff
*skb
)
145 struct list_head
*currlh
;
146 struct neighbor
*ret
= 0;
149 char source_hw
[MAX_ADDR_LEN
];
150 memset(source_hw
, 0, MAX_ADDR_LEN
);
151 if (skb
->dev
->header_ops
!= 0 &&
152 skb
->dev
->header_ops
->parse
!= 0)
153 skb
->dev
->header_ops
->parse(skb
, source_hw
);
155 mutex_lock(&(neighbor_operation_lock
));
157 currlh
= nb_list
.next
;
159 while (currlh
!= &nb_list
) {
160 struct neighbor
*curr
= container_of(currlh
, struct neighbor
,
163 if (memcmp(curr
->mac
, source_hw
, MAX_ADDR_LEN
) == 0) {
165 kref_get(&(ret
->ref
));
170 currlh
= currlh
->next
;
174 mutex_unlock(&(neighbor_operation_lock
));
179 struct neighbor
*find_neigh(__u16 addrtypelen
, __u8
*addrtype
,
180 __u16 addrlen
, __u8
*addr
)
182 struct list_head
*currlh
;
183 struct neighbor
*ret
= 0;
185 if (get_addrtype(addrtypelen
, addrtype
) != ADDRTYPE_ID
)
188 mutex_lock(&(neighbor_operation_lock
));
190 currlh
= nb_list
.next
;
192 while (currlh
!= &nb_list
) {
193 struct neighbor
*curr
= container_of(currlh
, struct neighbor
,
196 if (curr
->addrlen
== addrlen
&& memcmp(curr
->addr
, addr
,
199 kref_get(&(ret
->ref
));
204 currlh
= currlh
->next
;
208 mutex_unlock(&(neighbor_operation_lock
));
213 __u32
generate_neigh_list(char *buf
, __u32 buflen
, __u32 limit
, __u32 offset
)
215 struct list_head
*currlh
;
217 char *p_totalneighs
= buf
;
218 char *p_response_rows
= buf
+ 4;
225 __u32 buf_offset
= 8;
230 mutex_lock(&(neighbor_operation_lock
));
232 currlh
= nb_list
.next
;
234 while (currlh
!= &nb_list
) {
235 struct neighbor
*curr
= container_of(currlh
, struct neighbor
,
239 /* get_neigh_state not used here because it would daedlock */
240 mutex_lock(&(curr
->state_lock
));
242 mutex_unlock(&(curr
->state_lock
));
244 if (state
!= NEIGHBOR_STATE_ACTIVE
)
250 if (unlikely(buflen
- buf_offset
- 6 - 2 - curr
->addrlen
< 0))
256 put_u16(buf
+ buf_offset
, 1, 1);/* numaddr */
258 put_u16(buf
+ buf_offset
, 2, 1);/* addrtypelen */
260 put_u16(buf
+ buf_offset
, curr
->addrlen
, 1);/* addren */
262 buf
[buf_offset
] = 'i'; /* addrtype */
264 buf
[buf_offset
] = 'd';
266 memcpy(buf
+ buf_offset
, curr
->addr
, curr
->addrlen
); /* addr */
267 buf_offset
+= curr
->addrlen
;
269 BUG_ON(buf_offset
> buflen
);
276 currlh
= currlh
->next
;
279 mutex_unlock(&(neighbor_operation_lock
));
281 put_u32(p_totalneighs
, total
, 1);
282 put_u32(p_response_rows
, cnt
, 1);
287 void set_last_routdtrip(struct neighbor
*nb
, unsigned long time
)
291 mutex_lock(&(nb
->state_lock
));
293 if(likely(nb
->state
== NEIGHBOR_STATE_ACTIVE
) && time_after(time
,
294 nb
->state_time
.last_roundtrip
))
295 nb
->state_time
.last_roundtrip
= time
;
297 mutex_unlock(&(nb
->state_lock
));
300 int get_neigh_state(struct neighbor
*nb
)
303 int switchedtostalled
= 0;
307 mutex_lock(&(nb
->state_lock
));
309 if (unlikely(likely(nb
->state
== NEIGHBOR_STATE_ACTIVE
) && unlikely(
310 time_after(nb
->state_time
.last_roundtrip
+
311 msecs_to_jiffies(STALL_START_TIME_MS
), jiffies
)))) {
312 nb
->state
= NEIGHBOR_STATE_STALLED
;
313 switchedtostalled
= 1;
318 mutex_unlock(&(nb
->state_lock
));
320 if (switchedtostalled
) {
321 printk(KERN_ERR
"switched to stalled");
322 #warning todo reset conns
328 static struct ping_cookie
*find_cookie(struct neighbor
*nb
, __u32 cookie
)
332 for(i
=0;i
<PING_COOKIES_PER_NEIGH
;i
++) {
333 if (nb
->cookies
[i
].cookie
== cookie
)
334 return &(nb
->cookies
[i
]);
339 void ping_resp(struct neighbor
*nb
, __u32 cookie
, __u32 respdelay
)
341 struct ping_cookie
*c
;
346 mutex_lock(&(nb
->pingcookie_lock
));
348 c
= find_cookie(nb
, cookie
);
353 newlatency
= ((((__s64
) ((__u32
)atomic_read(&(nb
->latency
)))) * 15 +
354 jiffies_to_usecs(jiffies
- c
->time
) - respdelay
) / 16);
355 if (unlikely(newlatency
< 0))
357 if (unlikely(newlatency
> (((__s64
)256)*256*256*256 - 1)))
358 newlatency
= ((__s64
)256)*256*256*256 - 1;
360 atomic_set(&(nb
->latency
), (__u32
) newlatency
);
363 nb
->ping_intransit
--;
365 for(i
=0;i
<PING_COOKIES_PER_NEIGH
;i
++) {
366 if (nb
->cookies
[i
].cookie
!= 0 &&
367 time_before(nb
->cookies
[i
].time
, c
->time
)) {
368 nb
->cookies
[i
].pongs
++;
369 if (nb
->cookies
[i
].pongs
>= PING_PONGLIMIT
) {
370 nb
->cookies
[i
].cookie
= 0;
371 nb
->cookies
[i
].pongs
= 0;
372 nb
->ping_intransit
--;
377 mutex_lock(&(nb
->state_lock
));
379 if (unlikely(nb
->state
== NEIGHBOR_STATE_INITIAL
||
380 nb
->state
== NEIGHBOR_STATE_STALLED
)) {
383 if (nb
->state
== NEIGHBOR_STATE_INITIAL
) {
384 __u64 jiffies64
= get_jiffies_64();
385 if (nb
->state_time
.last_state_change
== 0)
386 nb
->state_time
.last_state_change
= jiffies64
;
387 if (jiffies64
<= (nb
->state_time
.last_state_change
+
388 msecs_to_jiffies(INITIAL_TIME_MS
)))
392 if (nb
->ping_success
>= PING_SUCCESS_CNT
) {
393 if (nb
->state
== NEIGHBOR_STATE_INITIAL
)
394 printk(KERN_ERR
"switched from initial to active");
396 printk(KERN_ERR
"switched from stalled to active");
397 nb
->state
= NEIGHBOR_STATE_ACTIVE
;
398 nb
->ping_success
= 0;
399 nb
->state_time
.last_roundtrip
= jiffies
;
404 mutex_unlock(&(nb
->state_lock
));
407 mutex_unlock(&(nb
->pingcookie_lock
));
410 __u32
add_ping_req(struct neighbor
*nb
)
412 struct ping_cookie
*c
;
417 mutex_lock(&(nb
->pingcookie_lock
));
419 for (i
=0;i
<PING_COOKIES_PER_NEIGH
;i
++) {
420 if (nb
->cookies
[i
].cookie
== 0)
424 get_random_bytes((char *) &i
, sizeof(i
));
425 i
= (i
% (PING_COOKIES_PER_NEIGH
- PING_COOKIES_FIFO
)) +
429 c
= &(nb
->cookies
[i
]);
433 if (unlikely(nb
->lastcookie
== 0))
435 c
->cookie
= nb
->lastcookie
;
437 nb
->ping_intransit
++;
441 mutex_unlock(&(nb
->pingcookie_lock
));
448 * Check additional to the checks and timings already done in kpacket_gen.c
449 * This is primarily to make sure that we do not invalidate other ping cookies
450 * which might still receive responses. It does this by requiring a certain
451 * mimimum delay between pings, depending on how many pings are already in
454 int time_to_send_ping(struct neighbor
*nb
)
458 mutex_lock(&(nb
->pingcookie_lock
));
459 if (nb
->ping_intransit
>= PING_COOKIES_NOTHROTTLE
) {
460 __u32 mindelay
= (((__u32
)atomic_read(&(nb
->latency
)))/1000) <<
461 (nb
->ping_intransit
+ 1 -
462 PING_COOKIES_NOTHROTTLE
);
463 if (mindelay
> PING_THROTTLE_LIMIT_MS
)
464 mindelay
= PING_THROTTLE_LIMIT_MS
;
466 if (jiffies_to_msecs(jiffies
- nb
->last_ping_time
) < mindelay
)
469 mutex_unlock(&(nb
->pingcookie_lock
));
474 static void add_neighbor(struct neighbor
*nb
)
476 struct list_head
*currlh
= nb_list
.next
;
478 BUG_ON((nb
->addr
== 0) != (nb
->addrlen
== 0));
480 while (currlh
!= &nb_list
) {
481 struct neighbor
*curr
= container_of(currlh
, struct neighbor
,
484 if (curr
->addrlen
== nb
->addrlen
&& memcmp(curr
->addr
, nb
->addr
,
486 goto already_present
;
488 currlh
= currlh
->next
;
491 list_add_tail(&(nb
->nb_list
), &nb_list
);
492 schedule_controlmsg_timerfunc(nb
);
493 setup_timer(&(nb
->retrans_timer
), retransmit_timerfunc
,
498 kmem_cache_free(nb_slab
, nb
);
502 static __u32
pull_u32(struct sk_buff
*skb
, int convbo
)
504 char *ptr
= cor_pull_skb(skb
, 4);
510 ((char *)&ret
)[0] = ptr
[0];
511 ((char *)&ret
)[1] = ptr
[1];
512 ((char *)&ret
)[2] = ptr
[2];
513 ((char *)&ret
)[3] = ptr
[3];
516 return be32_to_cpu(ret
);
520 static int apply_announce_addaddr(struct neighbor
*nb
, __u32 cmd
, __u32 len
,
528 BUG_ON((nb
->addr
== 0) != (nb
->addrlen
== 0));
536 addrtypelen
= be16_to_cpu(*((__u16
*) cmddata
));
543 addrlen
= be16_to_cpu(*((__u16
*) cmddata
));
548 cmddata
+= addrtypelen
;
558 if (get_addrtype(addrtypelen
, addrtype
) != ADDRTYPE_ID
)
561 nb
->addr
= kmalloc(addrlen
, GFP_KERNEL
);
565 memcpy(nb
->addr
, addr
, addrlen
);
566 nb
->addrlen
= addrlen
;
571 static void apply_announce_cmd(struct neighbor
*nb
, __u32 cmd
, __u32 len
,
574 if (cmd
== NEIGHCMD_ADDADDR
) {
575 apply_announce_addaddr(nb
, cmd
, len
, cmddata
);
577 /* ignore unknown cmds */
581 static void apply_announce_cmds(char *msg
, __u32 len
, struct net_device
*dev
,
584 struct neighbor
*nb
= alloc_neighbor(GFP_KERNEL
);
593 cmd
= be32_to_cpu(*((__u32
*) msg
));
596 cmdlen
= be32_to_cpu(*((__u32
*) msg
));
600 BUG_ON(cmdlen
> len
);
602 apply_announce_cmd(nb
, cmd
, cmdlen
, msg
);
610 memcpy(nb
->mac
, source_hw
, MAX_ADDR_LEN
);
618 static int check_announce_cmds(char *msg
, __u32 len
)
624 cmd
= be32_to_cpu(*((__u32
*) msg
));
627 cmdlen
= be32_to_cpu(*((__u32
*) msg
));
631 /* malformated packet */
645 static void parse_announce(char *msg
, __u32 len
, struct net_device
*dev
,
648 __u32 min_announce_version
;
649 __u32 max_announce_version
;
650 __u32 min_cor_version
;
651 __u32 max_cor_version
;
656 min_announce_version
= be32_to_cpu(*((__u32
*) msg
));
659 max_announce_version
= be32_to_cpu(*((__u32
*) msg
));
662 min_cor_version
= be32_to_cpu(*((__u32
*) msg
));
665 max_cor_version
= be32_to_cpu(*((__u32
*) msg
));
669 if (min_announce_version
!= 0)
671 if (min_cor_version
!= 0)
673 if (check_announce_cmds(msg
, len
)) {
676 apply_announce_cmds(msg
, len
, dev
, source_hw
);
680 /* lh has to be first */
682 struct sk_buff_head skbs
; /* sorted by offset */
683 struct net_device
*dev
;
684 char source_hw
[MAX_ADDR_LEN
];
685 __u32 announce_proto_version
;
686 __u32 packet_version
;
689 __u64 last_received_packet
;
692 LIST_HEAD(announce_list
);
694 struct kmem_cache
*announce_in_slab
;
696 static void merge_announce(struct announce_in
*ann
)
698 char *msg
= kmalloc(ann
->total_size
, GFP_KERNEL
);
702 /* try again when next packet arrives */
706 while (copy
!= ann
->total_size
) {
710 if (skb_queue_empty(&(ann
->skbs
))) {
711 printk(KERN_ERR
"net/cor/neighbor.c: sk_head ran "
712 "empty while merging packets\n");
716 skb
= skb_dequeue(&(ann
->skbs
));
720 if (currcpy
+ copy
> ann
->total_size
)
723 #warning todo overlapping skbs
724 memcpy(msg
+ copy
, skb
->data
, currcpy
);
729 parse_announce(msg
, ann
->total_size
, ann
->dev
, ann
->source_hw
);
736 list_del(&(ann
->lh
));
737 kmem_cache_free(announce_in_slab
, ann
);
740 static int _rcv_announce(struct sk_buff
*skb
, struct announce_in
*ann
)
742 struct skb_procstate
*ps
= skb_pstate(skb
);
744 __u32 offset
= ps
->funcstate
.announce
.offset
;
745 __u32 len
= skb
->len
;
747 __u32 curroffset
= 0;
748 __u32 prevoffset
= 0;
751 struct sk_buff
*curr
= ann
->skbs
.next
;
753 if (len
+ offset
> ann
->total_size
) {
760 * Try to find the right place to insert in the sorted list. This
761 * means to process the list until we find a skb which has a greater
762 * offset, so we can insert before it to keep the sort order. However,
763 * this is complicated by the fact that the new skb must not be inserted
764 * between 2 skbs if there is no data missing in between. So the loop
765 * runs has to keep running until there is either a gap to insert or
766 * we see that this data has already been received.
768 while ((void *) curr
!= (void *) &(ann
->skbs
)) {
769 struct skb_procstate
*currps
= skb_pstate(skb
);
771 curroffset
= currps
->funcstate
.announce
.offset
;
773 if (curroffset
> offset
&& (prevoffset
+ prevlen
) < curroffset
)
776 prevoffset
= curroffset
;
780 if ((offset
+len
) <= (prevoffset
+prevlen
)) {
781 /* we already have this data */
788 * Calculate how much data was really received, by substracting
789 * the bytes we already have.
791 if (unlikely(prevoffset
+ prevlen
> offset
)) {
792 len
-= (prevoffset
+ prevlen
) - offset
;
793 offset
= prevoffset
+ prevlen
;
796 if (unlikely((void *) curr
!= (void *) &(ann
->skbs
) &&
797 (offset
+ len
) > curroffset
))
798 len
= curroffset
- offset
;
800 ann
->received_size
+= len
;
801 BUG_ON(ann
->received_size
> ann
->total_size
);
802 __skb_queue_before(&(ann
->skbs
), curr
, skb
);
803 ann
->last_received_packet
= get_jiffies_64();
805 if (ann
->received_size
== ann
->total_size
)
807 else if (ann
->skbs
.qlen
>= 16)
813 void rcv_announce(struct sk_buff
*skb
)
815 struct skb_procstate
*ps
= skb_pstate(skb
);
816 struct announce_in
*curr
= 0;
817 struct announce_in
*leastactive
= 0;
820 __u32 announce_proto_version
= pull_u32(skb
, 1);
821 __u32 packet_version
= pull_u32(skb
, 1);
822 __u32 total_size
= pull_u32(skb
, 1);
824 char source_hw
[MAX_ADDR_LEN
];
825 memset(source_hw
, 0, MAX_ADDR_LEN
);
826 if (skb
->dev
->header_ops
!= 0 &&
827 skb
->dev
->header_ops
->parse
!= 0)
828 skb
->dev
->header_ops
->parse(skb
, source_hw
);
830 ps
->funcstate
.announce
.offset
= pull_u32(skb
, 1);
832 if (total_size
> 8192)
835 mutex_lock(&(neighbor_operation_lock
));
837 if (announce_proto_version
!= 0)
840 curr
= (struct announce_in
*) announce_list
.next
;
842 while (((struct list_head
*) curr
) != &(announce_list
)) {
844 if (curr
->dev
== skb
->dev
&&
845 memcmp(curr
->source_hw
, source_hw
, MAX_ADDR_LEN
) == 0 &&
846 curr
->announce_proto_version
== announce_proto_version
&&
847 curr
->packet_version
== packet_version
&&
848 curr
->total_size
== total_size
)
851 if (leastactive
== 0 || curr
->last_received_packet
<
852 leastactive
->last_received_packet
)
855 curr
= (struct announce_in
*) curr
->lh
.next
;
858 if (list_size
>= 128) {
859 BUG_ON(leastactive
== 0);
862 curr
->last_received_packet
= get_jiffies_64();
864 while (!skb_queue_empty(&(curr
->skbs
))) {
865 struct sk_buff
*skb2
= skb_dequeue(&(curr
->skbs
));
871 curr
= kmem_cache_alloc(announce_in_slab
,
876 skb_queue_head_init(&(curr
->skbs
));
877 list_add_tail((struct list_head
*) curr
, &announce_list
);
880 curr
->packet_version
= packet_version
;
881 curr
->total_size
= total_size
;
882 curr
->received_size
= 0;
883 curr
->announce_proto_version
= announce_proto_version
;
884 curr
->dev
= skb
->dev
;
886 memcpy(curr
->source_hw
, source_hw
, MAX_ADDR_LEN
);
889 if (_rcv_announce(skb
, curr
)) {
890 list_del((struct list_head
*) curr
);
892 kmem_cache_free(announce_in_slab
, curr
);
900 mutex_unlock(&(neighbor_operation_lock
));
906 __u32 packet_version
;
908 __u32 announce_msg_len
;
911 struct announce
*last_announce
;
913 struct announce_data
{
914 struct delayed_work announce_work
;
916 struct net_device
*dev
;
918 struct announce
*ann
;
922 __u32 curr_announce_msg_offset
;
923 __u64 scheduled_announce_timer
;
926 static void _splitsend_announce(struct announce_data
*ann
)
929 __u32 packet_size
= 256;
930 __u32 remainingdata
= ann
->ann
->announce_msg_len
-
931 ann
->curr_announce_msg_offset
;
932 __u32 headroom
= LL_ALLOCATED_SPACE(ann
->dev
);
933 __u32 overhead
= 17 + headroom
;
937 if (remainingdata
< packet_size
)
938 packet_size
= remainingdata
;
940 skb
= alloc_skb(packet_size
+ overhead
, GFP_KERNEL
);
941 if (unlikely(0 == skb
))
944 skb
->protocol
= htons(ETH_P_COR
);
946 skb_reserve(skb
, headroom
);
948 if(unlikely(dev_hard_header(skb
, ann
->dev
, ETH_P_COR
,
949 ann
->dev
->broadcast
, ann
->dev
->dev_addr
, skb
->len
) < 0))
952 skb_reset_network_header(skb
);
954 header
= skb_put(skb
, 17);
955 if (unlikely(header
== 0))
958 header
[0] = PACKET_TYPE_ANNOUNCE
;
960 put_u32(header
+ 1, 0, 1); /* announce proto version */
961 put_u32(header
+ 5, ann
->ann
->packet_version
, 1); /* packet version */
962 put_u32(header
+ 9, ann
->ann
->announce_msg_len
, 1); /* total size */
963 put_u32(header
+ 13, ann
->curr_announce_msg_offset
, 1); /* offset */
965 ptr
= skb_put(skb
, packet_size
);
966 if (unlikely(ptr
== 0))
969 memcpy(ptr
, ann
->ann
->announce_msg
+ ann
->curr_announce_msg_offset
, packet_size
);
972 ann
->curr_announce_msg_offset
+= packet_size
;
974 if (ann
->curr_announce_msg_offset
== ann
->ann
->announce_msg_len
)
975 ann
->curr_announce_msg_offset
= 0;
984 static void announce_free(struct kref
*ref
)
986 struct announce
*ann
= container_of(ref
, struct announce
, ref
);
987 kfree(&(ann
->announce_msg
));
991 static void splitsend_announce(struct work_struct
*work
)
993 struct announce_data
*ann
= container_of(to_delayed_work(work
),
994 struct announce_data
, announce_work
);
997 mutex_lock(&(neighbor_operation_lock
));
1004 if (ann
->ann
== 0 && last_announce
== 0)
1007 if (ann
->curr_announce_msg_offset
== 0 && ann
->ann
!= last_announce
) {
1009 kref_put(&(ann
->ann
->ref
), announce_free
);
1010 ann
->ann
= last_announce
;
1011 kref_get(&(ann
->ann
->ref
));
1014 _splitsend_announce(ann
);
1016 mutex_unlock(&(neighbor_operation_lock
));
1019 int target_delay_ms
= 500;
1020 int target_delay_jiffies
= msecs_to_jiffies(target_delay_ms
);
1021 __u64 jiffies
= get_jiffies_64();
1024 ann
->scheduled_announce_timer
+= target_delay_jiffies
;
1026 delay
= ann
->scheduled_announce_timer
- jiffies
;
1030 INIT_DELAYED_WORK(&(ann
->announce_work
), splitsend_announce
);
1031 schedule_delayed_work(&(ann
->announce_work
), delay
);
1035 static struct announce_data
*get_announce_by_netdev(struct net_device
*dev
)
1037 struct list_head
*lh
= announce_out_list
.next
;
1039 while (lh
!= &announce_out_list
) {
1040 struct announce_data
*curr
= (struct announce_data
*)(
1042 offsetof(struct announce_data
, lh
));
1044 if (curr
->dev
== dev
)
1051 static void announce_sent_adddev(struct net_device
*dev
)
1053 struct announce_data
*ann
;
1055 ann
= kmalloc(sizeof(struct announce_data
), GFP_KERNEL
);
1058 printk(KERN_ERR
"cor cannot allocate memory for sending "
1063 memset(ann
, 0, sizeof(struct announce_data
));
1068 mutex_lock(&(neighbor_operation_lock
));
1069 list_add_tail(&(ann
->lh
), &announce_out_list
);
1070 mutex_unlock(&(neighbor_operation_lock
));
1072 ann
->scheduled_announce_timer
= get_jiffies_64();
1073 INIT_DELAYED_WORK(&(ann
->announce_work
), splitsend_announce
);
1074 schedule_delayed_work(&(ann
->announce_work
), 1);
1077 static void announce_sent_rmdev(struct net_device
*dev
)
1079 struct announce_data
*ann
;
1081 mutex_lock(&(neighbor_operation_lock
));
1083 ann
= get_announce_by_netdev(dev
);
1092 mutex_unlock(&(neighbor_operation_lock
));
1095 int netdev_notify_func(struct notifier_block
*not, unsigned long event
,
1098 struct net_device
*dev
= (struct net_device
*) ptr
;
1102 announce_sent_adddev(dev
);
1105 announce_sent_rmdev(dev
);
1109 case NETDEV_REGISTER
:
1110 case NETDEV_UNREGISTER
:
1111 case NETDEV_CHANGEMTU
:
1112 case NETDEV_CHANGEADDR
:
1113 case NETDEV_GOING_DOWN
:
1114 case NETDEV_CHANGENAME
:
1115 case NETDEV_FEAT_CHANGE
:
1116 case NETDEV_BONDING_FAILOVER
:
1125 static int set_announce(char *msg
, __u32 len
)
1127 struct announce
*ann
= kmalloc(sizeof(struct announce
), GFP_KERNEL
);
1134 memset(ann
, 0, sizeof(struct announce
));
1136 ann
->announce_msg
= msg
;
1137 ann
->announce_msg_len
= len
;
1139 kref_init(&(ann
->ref
));
1141 mutex_lock(&(neighbor_operation_lock
));
1143 if (last_announce
!= 0) {
1144 ann
->packet_version
= last_announce
->packet_version
+ 1;
1145 kref_put(&(last_announce
->ref
), announce_free
);
1148 last_announce
= ann
;
1150 mutex_unlock(&(neighbor_operation_lock
));
1155 static int generate_announce(void)
1157 __u32 addrtypelen
= strlen(addrtype
);
1160 __u32 cmd_hdr_len
= 8;
1161 __u32 cmd_len
= 2 + 2 + addrtypelen
+ addrlen
;
1163 __u32 len
= hdr_len
+ cmd_hdr_len
+ cmd_len
;
1166 char *msg
= kmalloc(len
, GFP_KERNEL
);
1170 put_u32(msg
+ offset
, 0, 1); /* min_announce_proto_version */
1172 put_u32(msg
+ offset
, 0, 1); /* max_announce_proto_version */
1174 put_u32(msg
+ offset
, 0, 1); /* min_cor_proto_version */
1176 put_u32(msg
+ offset
, 0, 1); /* max_cor_proto_version */
1180 put_u32(msg
+ offset
, NEIGHCMD_ADDADDR
, 1); /* command */
1182 put_u32(msg
+ offset
, cmd_len
, 1); /* command length */
1185 /* addrtypelen, addrlen */
1186 put_u16(msg
+ offset
, addrtypelen
, 1);
1188 put_u16(msg
+ offset
, addrlen
, 1);
1191 /* addrtype, addr */
1192 memcpy(msg
+ offset
, addrtype
, addrtypelen
);
1193 offset
+= addrtypelen
;
1194 memcpy(msg
+ offset
, addr
, addrlen
);
1197 BUG_ON(offset
!= len
);
1199 return set_announce(msg
, len
);
1202 int __init
cor_neighbor_init(void)
1206 addr
= kmalloc(addrlen
, GFP_KERNEL
);
1210 get_random_bytes(addr
, addrlen
);
1212 nb_slab
= kmem_cache_create("cor_neighbor", sizeof(struct neighbor
), 8,
1214 announce_in_slab
= kmem_cache_create("cor_announce_in",
1215 sizeof(struct announce_in
), 8, 0, 0);
1217 if (generate_announce())
1220 memset(&netdev_notify
, 0, sizeof(netdev_notify
));
1221 netdev_notify
.notifier_call
= netdev_notify_func
;
1222 register_netdevice_notifier(&netdev_notify
);
1233 MODULE_LICENSE("GPL");