2 * Connection oriented routing
3 * Copyright (C) 2007-2009 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
24 * Splited packet data format:
25 * announce proto version [4]
26 * is 0, may be increased if format changes
28 * starts with 0, increments every time the data field changes
30 * total data size of all merged packets
32 * used to determine the order when merging the split packet
35 * commulative checksum [8] (not yet)
36 * chunk 1 contains the checksum of the data in chunk 1
37 * chunk 2 contains the checksum of the data in chunk 1+2
40 * Data format of the announce packet "data" field:
41 * min_announce_proto_version [4]
42 * max_announce_proto_version [4]
43 * min_cor_proto_version [4]
44 * max_cor_proto_version [4]
45 * versions which are understood
49 * commanddata [commandlength]
54 #define NEIGHCMD_ADDADDR 1
59 * addrtype [addrtypelen]
65 DEFINE_MUTEX(neighbor_operation_lock
);
67 char *addrtype
= "id";
73 struct kmem_cache
*nb_slab
;
75 LIST_HEAD(announce_out_list
);
77 struct notifier_block netdev_notify
;
80 #define ADDRTYPE_UNKNOWN 0
83 static int get_addrtype(__u32 addrtypelen
, char *addrtype
)
85 if (addrtypelen
== 2 &&
86 (addrtype
[0] == 'i' || addrtype
[0] == 'I') &&
87 (addrtype
[1] == 'd' || addrtype
[1] == 'D'))
90 return ADDRTYPE_UNKNOWN
;
93 void neighbor_free(struct kref
*ref
)
95 struct neighbor
*nb
= container_of(ref
, struct neighbor
, ref
);
96 printk(KERN_ERR
"neighbor free");
97 BUG_ON(nb
->nb_list
.next
!= LIST_POISON1
);
98 BUG_ON(nb
->nb_list
.prev
!= LIST_POISON2
);
105 kmem_cache_free(nb_slab
, nb
);
108 static struct neighbor
*alloc_neighbor(gfp_t allocflags
)
110 struct neighbor
*nb
= kmem_cache_alloc(nb_slab
, allocflags
);
116 memset(nb
, 0, sizeof(struct neighbor
));
118 kref_init(&(nb
->ref
));
119 mutex_init(&(nb
->cmsg_lock
));
120 INIT_LIST_HEAD(&(nb
->control_msgs_out
));
121 INIT_LIST_HEAD(&(nb
->ucontrol_msgs_out
));
122 nb
->last_ping_time
= jiffies
;
123 atomic_set(&(nb
->ooo_packets
), 0);
124 get_random_bytes((char *) &seqno
, sizeof(seqno
));
125 mutex_init(&(nb
->pingcookie_lock
));
126 atomic_set(&(nb
->latency
), 0);
127 spin_lock_init(&(nb
->state_lock
));
128 atomic_set(&(nb
->kpacket_seqno
), seqno
);
129 mutex_init(&(nb
->conn_list_lock
));
130 INIT_LIST_HEAD(&(nb
->rcv_conn_list
));
131 INIT_LIST_HEAD(&(nb
->snd_conn_list
));
132 spin_lock_init(&(nb
->retrans_lock
));
133 spin_lock_init(&(nb
->retrans_lock
));
134 skb_queue_head_init(&(nb
->retrans_list
));
139 struct neighbor
*get_neigh_by_mac(struct sk_buff
*skb
)
141 struct list_head
*currlh
;
142 struct neighbor
*ret
= 0;
145 char source_hw
[MAX_ADDR_LEN
];
146 memset(source_hw
, 0, MAX_ADDR_LEN
);
147 if (skb
->dev
->header_ops
!= 0 &&
148 skb
->dev
->header_ops
->parse
!= 0)
149 skb
->dev
->header_ops
->parse(skb
, source_hw
);
151 mutex_lock(&(neighbor_operation_lock
));
153 currlh
= nb_list
.next
;
155 while (currlh
!= &nb_list
) {
156 struct neighbor
*curr
= container_of(currlh
, struct neighbor
,
159 if (memcmp(curr
->mac
, source_hw
, MAX_ADDR_LEN
) == 0) {
161 kref_get(&(ret
->ref
));
166 currlh
= currlh
->next
;
170 mutex_unlock(&(neighbor_operation_lock
));
175 struct neighbor
*find_neigh(__u16 addrtypelen
, __u8
*addrtype
,
176 __u16 addrlen
, __u8
*addr
)
178 struct list_head
*currlh
;
179 struct neighbor
*ret
= 0;
181 if (get_addrtype(addrtypelen
, addrtype
) != ADDRTYPE_ID
)
184 mutex_lock(&(neighbor_operation_lock
));
186 currlh
= nb_list
.next
;
188 while (currlh
!= &nb_list
) {
189 struct neighbor
*curr
= container_of(currlh
, struct neighbor
,
192 if (curr
->addrlen
== addrlen
&& memcmp(curr
->addr
, addr
,
195 kref_get(&(ret
->ref
));
200 currlh
= currlh
->next
;
204 mutex_unlock(&(neighbor_operation_lock
));
209 __u32
generate_neigh_list(char *buf
, __u32 buflen
, __u32 limit
, __u32 offset
)
211 struct list_head
*currlh
;
213 char *p_totalneighs
= buf
;
214 char *p_response_rows
= buf
+ 4;
221 __u32 buf_offset
= 8;
226 mutex_lock(&(neighbor_operation_lock
));
228 currlh
= nb_list
.next
;
230 while (currlh
!= &nb_list
) {
231 struct neighbor
*curr
= container_of(currlh
, struct neighbor
,
235 unsigned long iflags
;
236 /* get_neigh_state not used here because it would deadlock */
237 spin_lock_irqsave( &(curr
->state_lock
), iflags
);
239 spin_unlock_irqrestore( &(curr
->state_lock
), iflags
);
241 if (state
!= NEIGHBOR_STATE_ACTIVE
)
247 if (unlikely(buflen
- buf_offset
- 6 - 2 - curr
->addrlen
< 0))
253 put_u16(buf
+ buf_offset
, 1, 1);/* numaddr */
255 put_u16(buf
+ buf_offset
, 2, 1);/* addrtypelen */
257 put_u16(buf
+ buf_offset
, curr
->addrlen
, 1);/* addren */
259 buf
[buf_offset
] = 'i'; /* addrtype */
261 buf
[buf_offset
] = 'd';
263 memcpy(buf
+ buf_offset
, curr
->addr
, curr
->addrlen
); /* addr */
264 buf_offset
+= curr
->addrlen
;
266 BUG_ON(buf_offset
> buflen
);
273 currlh
= currlh
->next
;
276 mutex_unlock(&(neighbor_operation_lock
));
278 put_u32(p_totalneighs
, total
, 1);
279 put_u32(p_response_rows
, cnt
, 1);
284 void set_last_routdtrip(struct neighbor
*nb
, unsigned long time
)
286 unsigned long iflags
;
290 spin_lock_irqsave( &(nb
->state_lock
), iflags
);
292 if(likely(nb
->state
== NEIGHBOR_STATE_ACTIVE
) && time_after(time
,
293 nb
->state_time
.last_roundtrip
))
294 nb
->state_time
.last_roundtrip
= time
;
296 spin_unlock_irqrestore( &(nb
->state_lock
), iflags
);
299 static void reset_stall_conns(struct neighbor
*nb
,
300 int stall_time_ms
, int resetall
)
302 struct list_head
*currlh
;
305 mutex_lock(&(nb
->conn_list_lock
));
306 currlh
= nb
->snd_conn_list
.next
;
308 while (currlh
!= &(nb
->snd_conn_list
)) {
309 struct conn
*rconn
= container_of(currlh
, struct conn
,
311 BUG_ON(rconn
->targettype
!= TARGET_OUT
);
313 if (resetall
|| stall_time_ms
>=
314 rconn
->target
.out
.stall_timeout_ms
) {
316 * reset_conn must not be called with conn_list_lock
319 mutex_unlock(&(nb
->conn_list_lock
));
323 currlh
= currlh
->next
;
325 mutex_unlock(&(nb
->conn_list_lock
));
328 static void stall_timerfunc(struct work_struct
*work
);
330 static void stall_timer(struct neighbor
*nb
, int fromtimer
)
337 unsigned long iflags
;
339 spin_lock_irqsave( &(nb
->state_lock
), iflags
);
340 stall_time_ms
= jiffies_to_msecs(jiffies
-
341 nb
->state_time
.last_roundtrip
);
344 if (unlikely(nbstate
!= NEIGHBOR_STATE_STALLED
))
345 nb
->str_timer_pending
= 0;
346 spin_unlock_irqrestore( &(nb
->state_lock
), iflags
);
348 if (unlikely(nbstate
!= NEIGHBOR_STATE_STALLED
)) {
349 kref_put(&(nb
->ref
), neighbor_free
);
353 resetall
= (stall_time_ms
> NB_KILL_TIME_MS
);
356 printk(KERN_ERR "reset_all");*/
358 reset_stall_conns(nb
, stall_time_ms
, resetall
);
361 spin_lock_irqsave( &(nb
->state_lock
), iflags
);
362 nb
->state
= NEIGHBOR_STATE_KILLED
;
363 spin_unlock_irqrestore( &(nb
->state_lock
), iflags
);
365 list_del(&(nb
->nb_list
));
366 kref_put(&(nb
->ref
), neighbor_free
); /* nb_list */
368 kref_put(&(nb
->ref
), neighbor_free
); /* stall_timer */
371 if (fromtimer
== 0) {
373 spin_lock_irqsave( &(nb
->state_lock
), iflags
);
374 pending
= nb
->str_timer_pending
;
375 spin_unlock_irqrestore( &(nb
->state_lock
), iflags
);
380 kref_get(&(nb
->ref
));
383 INIT_DELAYED_WORK(&(nb
->stalltimeout_timer
), stall_timerfunc
);
384 schedule_delayed_work(&(nb
->stalltimeout_timer
),
385 msecs_to_jiffies(STALL_TIMER_INTERVAL_MS
));
389 static void stall_timerfunc(struct work_struct
*work
)
391 struct neighbor
*nb
= container_of(to_delayed_work(work
),
392 struct neighbor
, stalltimeout_timer
);
397 int get_neigh_state(struct neighbor
*nb
)
400 int switchedtostalled
= 0;
401 unsigned long iflags
;
405 spin_lock_irqsave( &(nb
->state_lock
), iflags
);
407 if (unlikely(likely(nb
->state
== NEIGHBOR_STATE_ACTIVE
) && unlikely(
408 time_after_eq(jiffies
, nb
->state_time
.last_roundtrip
+
409 msecs_to_jiffies(NB_STALL_TIME_MS
))))) {
410 nb
->state
= NEIGHBOR_STATE_STALLED
;
411 switchedtostalled
= 1;
416 spin_unlock_irqrestore( &(nb
->state_lock
), iflags
);
418 if (switchedtostalled
) {
419 /*printk(KERN_ERR "switched to stalled");*/
426 static struct ping_cookie
*find_cookie(struct neighbor
*nb
, __u32 cookie
)
430 for(i
=0;i
<PING_COOKIES_PER_NEIGH
;i
++) {
431 if (nb
->cookies
[i
].cookie
== cookie
)
432 return &(nb
->cookies
[i
]);
437 void ping_resp(struct neighbor
*nb
, __u32 cookie
, __u32 respdelay
)
439 struct ping_cookie
*c
;
442 unsigned long cookie_sendtime
;
445 unsigned long iflags
;
447 mutex_lock(&(nb
->pingcookie_lock
));
449 c
= find_cookie(nb
, cookie
);
454 cookie_sendtime
= c
->time
;
456 newlatency
= ((((__s64
) ((__u32
)atomic_read(&(nb
->latency
)))) * 15 +
457 jiffies_to_usecs(jiffies
- c
->time
) - respdelay
) / 16);
458 if (unlikely(newlatency
< 0))
460 if (unlikely(newlatency
> (((__s64
)256)*256*256*256 - 1)))
461 newlatency
= ((__s64
)256)*256*256*256 - 1;
463 atomic_set(&(nb
->latency
), (__u32
) newlatency
);
466 nb
->ping_intransit
--;
468 for(i
=0;i
<PING_COOKIES_PER_NEIGH
;i
++) {
469 if (nb
->cookies
[i
].cookie
!= 0 &&
470 time_before(nb
->cookies
[i
].time
, c
->time
)) {
471 nb
->cookies
[i
].pongs
++;
472 if (nb
->cookies
[i
].pongs
>= PING_PONGLIMIT
) {
473 nb
->cookies
[i
].cookie
= 0;
474 nb
->cookies
[i
].pongs
= 0;
475 nb
->ping_intransit
--;
480 spin_lock_irqsave( &(nb
->state_lock
), iflags
);
482 if (unlikely(nb
->state
== NEIGHBOR_STATE_INITIAL
||
483 nb
->state
== NEIGHBOR_STATE_STALLED
)) {
486 if (nb
->state
== NEIGHBOR_STATE_INITIAL
) {
487 __u64 jiffies64
= get_jiffies_64();
488 if (nb
->state_time
.last_state_change
== 0)
489 nb
->state_time
.last_state_change
= jiffies64
;
490 if (jiffies64
<= (nb
->state_time
.last_state_change
+
491 msecs_to_jiffies(INITIAL_TIME_MS
)))
495 if (nb
->ping_success
>= PING_SUCCESS_CNT
) {
496 /*if (nb->state == NEIGHBOR_STATE_INITIAL)
497 printk(KERN_ERR "switched from initial to active");
499 printk(KERN_ERR "switched from stalled to active");
501 nb
->state
= NEIGHBOR_STATE_ACTIVE
;
502 nb
->ping_success
= 0;
503 nb
->state_time
.last_roundtrip
= jiffies
;
506 nb
->state_time
.last_roundtrip
= cookie_sendtime
;
510 spin_unlock_irqrestore( &(nb
->state_lock
), iflags
);
513 mutex_unlock(&(nb
->pingcookie_lock
));
516 __u32
add_ping_req(struct neighbor
*nb
)
518 struct ping_cookie
*c
;
523 mutex_lock(&(nb
->pingcookie_lock
));
525 for (i
=0;i
<PING_COOKIES_PER_NEIGH
;i
++) {
526 if (nb
->cookies
[i
].cookie
== 0)
530 get_random_bytes((char *) &i
, sizeof(i
));
531 i
= (i
% (PING_COOKIES_PER_NEIGH
- PING_COOKIES_FIFO
)) +
535 c
= &(nb
->cookies
[i
]);
539 if (unlikely(nb
->lastcookie
== 0))
541 c
->cookie
= nb
->lastcookie
;
543 nb
->ping_intransit
++;
547 mutex_unlock(&(nb
->pingcookie_lock
));
554 * Check additional to the checks and timings already done in kpacket_gen.c
555 * This is primarily to make sure that we do not invalidate other ping cookies
556 * which might still receive responses. It does this by requiring a certain
557 * mimimum delay between pings, depending on how many pings are already in
560 int time_to_send_ping(struct neighbor
*nb
)
564 mutex_lock(&(nb
->pingcookie_lock
));
565 if (nb
->ping_intransit
>= PING_COOKIES_NOTHROTTLE
) {
566 __u32 mindelay
= (((__u32
)atomic_read(&(nb
->latency
)))/1000) <<
567 (nb
->ping_intransit
+ 1 -
568 PING_COOKIES_NOTHROTTLE
);
569 if (mindelay
> PING_THROTTLE_LIMIT_MS
)
570 mindelay
= PING_THROTTLE_LIMIT_MS
;
572 if (jiffies_to_msecs(jiffies
- nb
->last_ping_time
) < mindelay
)
575 mutex_unlock(&(nb
->pingcookie_lock
));
580 static void add_neighbor(struct neighbor
*nb
)
582 struct list_head
*currlh
= nb_list
.next
;
584 BUG_ON((nb
->addr
== 0) != (nb
->addrlen
== 0));
586 while (currlh
!= &nb_list
) {
587 struct neighbor
*curr
= container_of(currlh
, struct neighbor
,
590 if (curr
->addrlen
== nb
->addrlen
&& memcmp(curr
->addr
, nb
->addr
,
592 goto already_present
;
594 currlh
= currlh
->next
;
596 /* kref_get not needed here, because the caller leaves its ref to us */
597 printk(KERN_ERR
"add_neigh");
598 list_add_tail(&(nb
->nb_list
), &nb_list
);
599 schedule_controlmsg_timerfunc(nb
);
600 setup_timer(&(nb
->retrans_timer
), retransmit_timerfunc
,
605 kmem_cache_free(nb_slab
, nb
);
609 static __u32
pull_u32(struct sk_buff
*skb
, int convbo
)
611 char *ptr
= cor_pull_skb(skb
, 4);
617 ((char *)&ret
)[0] = ptr
[0];
618 ((char *)&ret
)[1] = ptr
[1];
619 ((char *)&ret
)[2] = ptr
[2];
620 ((char *)&ret
)[3] = ptr
[3];
623 return be32_to_cpu(ret
);
627 static int apply_announce_addaddr(struct neighbor
*nb
, __u32 cmd
, __u32 len
,
635 BUG_ON((nb
->addr
== 0) != (nb
->addrlen
== 0));
643 addrtypelen
= be16_to_cpu(*((__u16
*) cmddata
));
650 addrlen
= be16_to_cpu(*((__u16
*) cmddata
));
655 cmddata
+= addrtypelen
;
665 if (get_addrtype(addrtypelen
, addrtype
) != ADDRTYPE_ID
)
668 nb
->addr
= kmalloc(addrlen
, GFP_KERNEL
);
672 memcpy(nb
->addr
, addr
, addrlen
);
673 nb
->addrlen
= addrlen
;
678 static void apply_announce_cmd(struct neighbor
*nb
, __u32 cmd
, __u32 len
,
681 if (cmd
== NEIGHCMD_ADDADDR
) {
682 apply_announce_addaddr(nb
, cmd
, len
, cmddata
);
684 /* ignore unknown cmds */
688 static void apply_announce_cmds(char *msg
, __u32 len
, struct net_device
*dev
,
691 struct neighbor
*nb
= alloc_neighbor(GFP_KERNEL
);
700 cmd
= be32_to_cpu(*((__u32
*) msg
));
703 cmdlen
= be32_to_cpu(*((__u32
*) msg
));
707 BUG_ON(cmdlen
> len
);
709 apply_announce_cmd(nb
, cmd
, cmdlen
, msg
);
717 memcpy(nb
->mac
, source_hw
, MAX_ADDR_LEN
);
724 static int check_announce_cmds(char *msg
, __u32 len
)
730 cmd
= be32_to_cpu(*((__u32
*) msg
));
733 cmdlen
= be32_to_cpu(*((__u32
*) msg
));
737 /* malformated packet */
751 static void parse_announce(char *msg
, __u32 len
, struct net_device
*dev
,
754 __u32 min_announce_version
;
755 __u32 max_announce_version
;
756 __u32 min_cor_version
;
757 __u32 max_cor_version
;
762 min_announce_version
= be32_to_cpu(*((__u32
*) msg
));
765 max_announce_version
= be32_to_cpu(*((__u32
*) msg
));
768 min_cor_version
= be32_to_cpu(*((__u32
*) msg
));
771 max_cor_version
= be32_to_cpu(*((__u32
*) msg
));
775 if (min_announce_version
!= 0)
777 if (min_cor_version
!= 0)
779 if (check_announce_cmds(msg
, len
)) {
782 apply_announce_cmds(msg
, len
, dev
, source_hw
);
786 /* lh has to be first */
788 struct sk_buff_head skbs
; /* sorted by offset */
789 struct net_device
*dev
;
790 char source_hw
[MAX_ADDR_LEN
];
791 __u32 announce_proto_version
;
792 __u32 packet_version
;
795 __u64 last_received_packet
;
798 LIST_HEAD(announce_list
);
800 struct kmem_cache
*announce_in_slab
;
802 static void merge_announce(struct announce_in
*ann
)
804 char *msg
= kmalloc(ann
->total_size
, GFP_KERNEL
);
808 /* try again when next packet arrives */
812 while (copy
!= ann
->total_size
) {
816 struct skb_procstate
*ps
;
818 if (skb_queue_empty(&(ann
->skbs
))) {
819 printk(KERN_ERR
"net/cor/neighbor.c: sk_head ran "
820 "empty while merging packets\n");
824 skb
= skb_dequeue(&(ann
->skbs
));
825 ps
= skb_pstate(skb
);
828 if (unlikely(ps
->funcstate
.announce
.offset
> copy
)) {
829 printk(KERN_ERR
"net/cor/neighbor.c: invalid offset"
834 if (unlikely(ps
->funcstate
.announce
.offset
< copy
)) {
835 offset
= copy
- ps
->funcstate
.announce
.offset
;
839 if (currcpy
+ copy
> ann
->total_size
)
842 memcpy(msg
+ copy
, skb
->data
+ offset
, currcpy
);
847 parse_announce(msg
, ann
->total_size
, ann
->dev
, ann
->source_hw
);
854 list_del(&(ann
->lh
));
855 kmem_cache_free(announce_in_slab
, ann
);
858 static int _rcv_announce(struct sk_buff
*skb
, struct announce_in
*ann
)
860 struct skb_procstate
*ps
= skb_pstate(skb
);
862 __u32 offset
= ps
->funcstate
.announce
.offset
;
863 __u32 len
= skb
->len
;
865 __u32 curroffset
= 0;
866 __u32 prevoffset
= 0;
869 struct sk_buff
*curr
= ann
->skbs
.next
;
871 if (len
+ offset
> ann
->total_size
) {
878 * Try to find the right place to insert in the sorted list. This
879 * means to process the list until we find a skb which has a greater
880 * offset, so we can insert before it to keep the sort order. However,
881 * this is complicated by the fact that the new skb must not be inserted
882 * between 2 skbs if there is no data missing in between. So the loop
883 * runs has to keep running until there is either a gap to insert or
884 * we see that this data has already been received.
886 while ((void *) curr
!= (void *) &(ann
->skbs
)) {
887 struct skb_procstate
*currps
= skb_pstate(skb
);
889 curroffset
= currps
->funcstate
.announce
.offset
;
891 if (curroffset
> offset
&& (prevoffset
+ prevlen
) < curroffset
)
894 prevoffset
= curroffset
;
898 if ((offset
+len
) <= (prevoffset
+prevlen
)) {
899 /* we already have this data */
906 * Calculate how much data was really received, by substracting
907 * the bytes we already have.
909 if (unlikely(prevoffset
+ prevlen
> offset
)) {
910 len
-= (prevoffset
+ prevlen
) - offset
;
911 offset
= prevoffset
+ prevlen
;
914 if (unlikely((void *) curr
!= (void *) &(ann
->skbs
) &&
915 (offset
+ len
) > curroffset
))
916 len
= curroffset
- offset
;
918 ann
->received_size
+= len
;
919 BUG_ON(ann
->received_size
> ann
->total_size
);
920 __skb_queue_before(&(ann
->skbs
), curr
, skb
);
921 ann
->last_received_packet
= get_jiffies_64();
923 if (ann
->received_size
== ann
->total_size
)
925 else if (ann
->skbs
.qlen
>= 16)
931 void rcv_announce(struct sk_buff
*skb
)
933 struct skb_procstate
*ps
= skb_pstate(skb
);
934 struct announce_in
*curr
= 0;
935 struct announce_in
*leastactive
= 0;
938 __u32 announce_proto_version
= pull_u32(skb
, 1);
939 __u32 packet_version
= pull_u32(skb
, 1);
940 __u32 total_size
= pull_u32(skb
, 1);
942 char source_hw
[MAX_ADDR_LEN
];
943 memset(source_hw
, 0, MAX_ADDR_LEN
);
944 if (skb
->dev
->header_ops
!= 0 &&
945 skb
->dev
->header_ops
->parse
!= 0)
946 skb
->dev
->header_ops
->parse(skb
, source_hw
);
948 ps
->funcstate
.announce
.offset
= pull_u32(skb
, 1);
950 if (total_size
> 8192)
953 mutex_lock(&(neighbor_operation_lock
));
955 if (announce_proto_version
!= 0)
958 curr
= (struct announce_in
*) announce_list
.next
;
960 while (((struct list_head
*) curr
) != &(announce_list
)) {
962 if (curr
->dev
== skb
->dev
&&
963 memcmp(curr
->source_hw
, source_hw
, MAX_ADDR_LEN
) == 0 &&
964 curr
->announce_proto_version
== announce_proto_version
&&
965 curr
->packet_version
== packet_version
&&
966 curr
->total_size
== total_size
)
969 if (leastactive
== 0 || curr
->last_received_packet
<
970 leastactive
->last_received_packet
)
973 curr
= (struct announce_in
*) curr
->lh
.next
;
976 if (list_size
>= 128) {
977 BUG_ON(leastactive
== 0);
980 curr
->last_received_packet
= get_jiffies_64();
982 while (!skb_queue_empty(&(curr
->skbs
))) {
983 struct sk_buff
*skb2
= skb_dequeue(&(curr
->skbs
));
989 curr
= kmem_cache_alloc(announce_in_slab
,
994 skb_queue_head_init(&(curr
->skbs
));
995 list_add_tail((struct list_head
*) curr
, &announce_list
);
998 curr
->packet_version
= packet_version
;
999 curr
->total_size
= total_size
;
1000 curr
->received_size
= 0;
1001 curr
->announce_proto_version
= announce_proto_version
;
1002 curr
->dev
= skb
->dev
;
1003 dev_hold(curr
->dev
);
1004 memcpy(curr
->source_hw
, source_hw
, MAX_ADDR_LEN
);
1007 if (_rcv_announce(skb
, curr
)) {
1008 list_del((struct list_head
*) curr
);
1010 kmem_cache_free(announce_in_slab
, curr
);
1018 mutex_unlock(&(neighbor_operation_lock
));
1024 __u32 packet_version
;
1026 __u32 announce_msg_len
;
1029 struct announce
*last_announce
;
1031 struct announce_data
{
1032 struct delayed_work announce_work
;
1034 struct net_device
*dev
;
1036 struct announce
*ann
;
1038 struct list_head lh
;
1040 __u32 curr_announce_msg_offset
;
1041 __u64 scheduled_announce_timer
;
1044 static void _splitsend_announce(struct announce_data
*ann
)
1046 struct sk_buff
*skb
;
1047 __u32 packet_size
= 256;
1048 __u32 remainingdata
= ann
->ann
->announce_msg_len
-
1049 ann
->curr_announce_msg_offset
;
1050 __u32 headroom
= LL_ALLOCATED_SPACE(ann
->dev
);
1051 __u32 overhead
= 17 + headroom
;
1055 if (remainingdata
< packet_size
)
1056 packet_size
= remainingdata
;
1058 skb
= alloc_skb(packet_size
+ overhead
, GFP_KERNEL
);
1059 if (unlikely(0 == skb
))
1062 skb
->protocol
= htons(ETH_P_COR
);
1063 skb
->dev
= ann
->dev
;
1064 skb_reserve(skb
, headroom
);
1066 if(unlikely(dev_hard_header(skb
, ann
->dev
, ETH_P_COR
,
1067 ann
->dev
->broadcast
, ann
->dev
->dev_addr
, skb
->len
) < 0))
1070 skb_reset_network_header(skb
);
1072 header
= skb_put(skb
, 17);
1073 if (unlikely(header
== 0))
1076 header
[0] = PACKET_TYPE_ANNOUNCE
;
1078 put_u32(header
+ 1, 0, 1); /* announce proto version */
1079 put_u32(header
+ 5, ann
->ann
->packet_version
, 1); /* packet version */
1080 put_u32(header
+ 9, ann
->ann
->announce_msg_len
, 1); /* total size */
1081 put_u32(header
+ 13, ann
->curr_announce_msg_offset
, 1); /* offset */
1083 ptr
= skb_put(skb
, packet_size
);
1084 if (unlikely(ptr
== 0))
1087 memcpy(ptr
, ann
->ann
->announce_msg
+ ann
->curr_announce_msg_offset
, packet_size
);
1088 dev_queue_xmit(skb
);
1090 ann
->curr_announce_msg_offset
+= packet_size
;
1092 if (ann
->curr_announce_msg_offset
== ann
->ann
->announce_msg_len
)
1093 ann
->curr_announce_msg_offset
= 0;
1102 static void announce_free(struct kref
*ref
)
1104 struct announce
*ann
= container_of(ref
, struct announce
, ref
);
1105 kfree(&(ann
->announce_msg
));
1109 static void splitsend_announce(struct work_struct
*work
)
1111 struct announce_data
*ann
= container_of(to_delayed_work(work
),
1112 struct announce_data
, announce_work
);
1115 mutex_lock(&(neighbor_operation_lock
));
1122 if (ann
->ann
== 0 && last_announce
== 0)
1125 if (ann
->curr_announce_msg_offset
== 0 && ann
->ann
!= last_announce
) {
1127 kref_put(&(ann
->ann
->ref
), announce_free
);
1128 ann
->ann
= last_announce
;
1129 kref_get(&(ann
->ann
->ref
));
1132 _splitsend_announce(ann
);
1134 mutex_unlock(&(neighbor_operation_lock
));
1137 int target_delay_ms
= 500;
1138 int target_delay_jiffies
= msecs_to_jiffies(target_delay_ms
);
1139 __u64 jiffies
= get_jiffies_64();
1142 ann
->scheduled_announce_timer
+= target_delay_jiffies
;
1144 delay
= ann
->scheduled_announce_timer
- jiffies
;
1148 INIT_DELAYED_WORK(&(ann
->announce_work
), splitsend_announce
);
1149 schedule_delayed_work(&(ann
->announce_work
), delay
);
1153 static struct announce_data
*get_announce_by_netdev(struct net_device
*dev
)
1155 struct list_head
*lh
= announce_out_list
.next
;
1157 while (lh
!= &announce_out_list
) {
1158 struct announce_data
*curr
= (struct announce_data
*)(
1160 offsetof(struct announce_data
, lh
));
1162 if (curr
->dev
== dev
)
1169 static void announce_sent_adddev(struct net_device
*dev
)
1171 struct announce_data
*ann
;
1173 ann
= kmalloc(sizeof(struct announce_data
), GFP_KERNEL
);
1176 printk(KERN_ERR
"cor cannot allocate memory for sending "
1181 memset(ann
, 0, sizeof(struct announce_data
));
1186 mutex_lock(&(neighbor_operation_lock
));
1187 list_add_tail(&(ann
->lh
), &announce_out_list
);
1188 mutex_unlock(&(neighbor_operation_lock
));
1190 ann
->scheduled_announce_timer
= get_jiffies_64();
1191 INIT_DELAYED_WORK(&(ann
->announce_work
), splitsend_announce
);
1192 schedule_delayed_work(&(ann
->announce_work
), 1);
1195 static void announce_sent_rmdev(struct net_device
*dev
)
1197 struct announce_data
*ann
;
1199 mutex_lock(&(neighbor_operation_lock
));
1201 ann
= get_announce_by_netdev(dev
);
1210 mutex_unlock(&(neighbor_operation_lock
));
1213 int netdev_notify_func(struct notifier_block
*not, unsigned long event
,
1216 struct net_device
*dev
= (struct net_device
*) ptr
;
1220 announce_sent_adddev(dev
);
1223 announce_sent_rmdev(dev
);
1227 case NETDEV_REGISTER
:
1228 case NETDEV_UNREGISTER
:
1229 case NETDEV_CHANGEMTU
:
1230 case NETDEV_CHANGEADDR
:
1231 case NETDEV_GOING_DOWN
:
1232 case NETDEV_CHANGENAME
:
1233 case NETDEV_FEAT_CHANGE
:
1234 case NETDEV_BONDING_FAILOVER
:
1243 static int set_announce(char *msg
, __u32 len
)
1245 struct announce
*ann
= kmalloc(sizeof(struct announce
), GFP_KERNEL
);
1252 memset(ann
, 0, sizeof(struct announce
));
1254 ann
->announce_msg
= msg
;
1255 ann
->announce_msg_len
= len
;
1257 kref_init(&(ann
->ref
));
1259 mutex_lock(&(neighbor_operation_lock
));
1261 if (last_announce
!= 0) {
1262 ann
->packet_version
= last_announce
->packet_version
+ 1;
1263 kref_put(&(last_announce
->ref
), announce_free
);
1266 last_announce
= ann
;
1268 mutex_unlock(&(neighbor_operation_lock
));
1273 static int generate_announce(void)
1275 __u32 addrtypelen
= strlen(addrtype
);
1278 __u32 cmd_hdr_len
= 8;
1279 __u32 cmd_len
= 2 + 2 + addrtypelen
+ addrlen
;
1281 __u32 len
= hdr_len
+ cmd_hdr_len
+ cmd_len
;
1284 char *msg
= kmalloc(len
, GFP_KERNEL
);
1288 put_u32(msg
+ offset
, 0, 1); /* min_announce_proto_version */
1290 put_u32(msg
+ offset
, 0, 1); /* max_announce_proto_version */
1292 put_u32(msg
+ offset
, 0, 1); /* min_cor_proto_version */
1294 put_u32(msg
+ offset
, 0, 1); /* max_cor_proto_version */
1298 put_u32(msg
+ offset
, NEIGHCMD_ADDADDR
, 1); /* command */
1300 put_u32(msg
+ offset
, cmd_len
, 1); /* command length */
1303 /* addrtypelen, addrlen */
1304 put_u16(msg
+ offset
, addrtypelen
, 1);
1306 put_u16(msg
+ offset
, addrlen
, 1);
1309 /* addrtype, addr */
1310 memcpy(msg
+ offset
, addrtype
, addrtypelen
);
1311 offset
+= addrtypelen
;
1312 memcpy(msg
+ offset
, addr
, addrlen
);
1315 BUG_ON(offset
!= len
);
1317 return set_announce(msg
, len
);
1320 int __init
cor_neighbor_init(void)
1324 addr
= kmalloc(addrlen
, GFP_KERNEL
);
1328 get_random_bytes(addr
, addrlen
);
1330 nb_slab
= kmem_cache_create("cor_neighbor", sizeof(struct neighbor
), 8,
1332 announce_in_slab
= kmem_cache_create("cor_announce_in",
1333 sizeof(struct announce_in
), 8, 0, 0);
1335 if (generate_announce())
1338 memset(&netdev_notify
, 0, sizeof(netdev_notify
));
1339 netdev_notify
.notifier_call
= netdev_notify_func
;
1340 register_netdevice_notifier(&netdev_notify
);
1351 MODULE_LICENSE("GPL");