2 * Connection oriented routing
3 * Copyright (C) 2007-2011 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <asm/byteorder.h>
25 /* not sent over the network - internal meaning only */
26 #define MSGTYPE_PONG 1
28 #define MSGTYPE_ACK_CONN 3
29 #define MSGTYPE_CONNECT 4
30 #define MSGTYPE_CONNECT_SUCCESS 5
31 #define MSGTYPE_RESET_CONN 6
32 #define MSGTYPE_CONNDATA 7
33 #define MSGTYPE_CONNID_UNKNOWN 8
34 #define MSGTYPE_SET_MAX_CMSG_DELAY 9
36 #define MSGTYPE_PONG_TIMEENQUEUED 1
37 #define MSGTYPE_PONG_RESPDELAY 2
39 struct control_msg_out
{
40 struct list_head lh
; /* either neighbor or control_retrans_packet */
45 unsigned long timeout
;
57 ktime_t time_enqueued
;
93 struct htab_entry htab_entry
;
95 __u32 conn_id_unknown
;
112 struct control_retrans
{
118 unsigned long timeout
;
120 struct list_head msgs
;
122 struct htab_entry htab_entry
;
123 struct list_head timeout_list
;
126 struct unknownconnid_matchparam
{
131 struct retransmit_matchparam
{
137 struct kmem_cache
*controlmsg_slab
;
138 struct kmem_cache
*controlretrans_slab
;
140 static struct htable retransmits
;
142 DEFINE_SPINLOCK(unknown_connids_lock
);
143 static struct htable unknown_connids
;
145 atomic_t cmcnt
= ATOMIC_INIT(0);
148 static void add_control_msg(struct control_msg_out
*msg
, int retrans
);
151 static __u32
ucm_to_key(struct unknownconnid_matchparam
*ucm
)
153 return ((__u32
)((long) ucm
->nb
)) ^ ucm
->conn_id
;
156 static __u32
rm_to_key(struct retransmit_matchparam
*rm
)
158 return ((__u32
)((long) rm
->nb
)) ^ rm
->seqno
;
161 static inline int isurgent(struct control_msg_out
*cm
)
163 if (unlikely(cm
->type
== MSGTYPE_PONG
|| cm
->type
== MSGTYPE_ACK
))
168 static struct control_msg_out
*__alloc_control_msg(void)
170 struct control_msg_out
*cm
= kmem_cache_alloc(controlmsg_slab
,
172 if (unlikely(cm
== 0))
174 memset(cm
, 0, sizeof(struct control_msg_out
));
175 cm
->lh
.next
= LIST_POISON1
;
176 cm
->lh
.prev
= LIST_POISON2
;
177 kref_init(&(cm
->ref
));
181 static int calc_limit(int limit
, int priority
)
183 if (priority
== ACM_PRIORITY_LOW
)
185 else if (priority
== ACM_PRIORITY_MED
)
186 return (limit
* 2 + 1)/3;
187 else if (priority
== ACM_PRIORITY_HIGH
)
193 int may_alloc_control_msg(struct neighbor
*nb
, int priority
)
195 long packets1
= atomic_read(&(nb
->cmcnt
));
196 long packets2
= atomic_read(&(cmcnt
));
198 BUG_ON(packets1
< 0);
199 BUG_ON(packets2
< 0);
201 if (packets1
< calc_limit(GUARANTEED_CMSGS_PER_NEIGH
, priority
))
204 if (unlikely(unlikely(packets2
>= calc_limit(MAX_CMSGS_PER_NEIGH
,
205 priority
)) || unlikely(packets1
>= (
206 calc_limit(MAX_CMSGS_PER_NEIGH
, priority
) *
207 (MAX_CMSGS
- packets2
) / MAX_CMSGS
))))
212 static struct control_msg_out
*_alloc_control_msg(struct neighbor
*nb
,
213 int priority
, int urgent
)
215 struct control_msg_out
*cm
= 0;
220 long packets1
= atomic_inc_return(&(nb
->cmcnt
));
221 long packets2
= atomic_inc_return(&(cmcnt
));
223 BUG_ON(packets1
<= 0);
224 BUG_ON(packets2
<= 0);
226 if (packets1
<= calc_limit(GUARANTEED_CMSGS_PER_NEIGH
,
230 if (unlikely(unlikely(packets2
> calc_limit(MAX_CMSGS_PER_NEIGH
,
231 priority
)) || unlikely(packets1
> (
232 calc_limit(MAX_CMSGS_PER_NEIGH
, priority
) *
233 (MAX_CMSGS
- packets2
) / MAX_CMSGS
))))
238 cm
= __alloc_control_msg();
239 if (unlikely(cm
== 0))
246 atomic_dec(&(nb
->cmcnt
));
247 atomic_dec(&(cmcnt
));
253 struct control_msg_out
*alloc_control_msg(struct neighbor
*nb
, int priority
)
255 return _alloc_control_msg(nb
, priority
, 0);
258 static void cmsg_kref_free(struct kref
*ref
)
260 struct control_msg_out
*cm
= container_of(ref
, struct control_msg_out
,
262 kmem_cache_free(controlmsg_slab
, cm
);
265 void free_control_msg(struct control_msg_out
*cm
)
267 if (isurgent(cm
) == 0) {
268 atomic_dec(&(cm
->nb
->cmcnt
));
269 atomic_dec(&(cmcnt
));
272 if (cm
->type
== MSGTYPE_ACK_CONN
) {
273 struct conn
*sconn
= cm
->msg
.ack_conn
.rconn
->reversedir
;
274 BUG_ON(cm
->msg
.ack_conn
.rconn
== 0);
275 BUG_ON(sconn
->targettype
!= TARGET_OUT
);
276 if ((cm
->msg
.ack_conn
.flags
& KP_ACK_CONN_FLAGS_CREDITS
) != 0 &&
277 sconn
->target
.out
.decaytime_send_allowed
!= 0) {
278 sconn
->target
.out
.decaytime_send_allowed
= 0;
279 refresh_conn_credits(sconn
, 0, 0);
282 kref_put(&(cm
->msg
.ack_conn
.rconn
->ref
), free_conn
);
283 cm
->msg
.ack_conn
.rconn
= 0;
284 } else if (cm
->type
== MSGTYPE_CONNECT
) {
285 BUG_ON(cm
->msg
.connect
.sconn
== 0);
286 kref_put(&(cm
->msg
.connect
.sconn
->ref
), free_conn
);
287 cm
->msg
.connect
.sconn
= 0;
288 } else if (cm
->type
== MSGTYPE_CONNECT_SUCCESS
) {
289 BUG_ON(cm
->msg
.connect_success
.rconn
== 0);
290 kref_put(&(cm
->msg
.connect_success
.rconn
->ref
), free_conn
);
291 cm
->msg
.connect_success
.rconn
= 0;
292 } else if (cm
->type
== MSGTYPE_RESET_CONN
||
293 cm
->type
== MSGTYPE_CONNID_UNKNOWN
) {
294 struct unknownconnid_matchparam ucm
;
297 ucm
.conn_id
= cm
->msg
.reset_connidunknown
.conn_id_unknown
;
299 htable_delete(&unknown_connids
, ucm_to_key(&ucm
), &ucm
,
303 kref_put(&(cm
->ref
), cmsg_kref_free
);
306 static void free_control_retrans(struct kref
*ref
)
308 struct control_retrans
*cr
= container_of(ref
, struct control_retrans
,
311 while (list_empty(&(cr
->msgs
)) == 0) {
312 struct control_msg_out
*cm
= container_of(cr
->msgs
.next
,
313 struct control_msg_out
, lh
);
315 free_control_msg(cm
);
318 kmem_cache_free(controlretrans_slab
, cr
);
322 static void set_retrans_timeout(struct control_retrans
*cr
, struct neighbor
*nb
)
324 cr
->timeout
= jiffies
+ usecs_to_jiffies(100000 +
325 ((__u32
) atomic_read(&(nb
->latency
))) * 2 +
326 ((__u32
) atomic_read(&(nb
->max_remote_cmsg_delay
))));
329 void retransmit_timerfunc(struct work_struct
*work
)
331 unsigned long iflags
;
333 struct neighbor
*nb
= container_of(to_delayed_work(work
),
334 struct neighbor
, retrans_timer
);
339 spin_lock_irqsave(&(nb
->state_lock
), iflags
);
341 spin_unlock_irqrestore(&(nb
->state_lock
), iflags
);
344 struct control_retrans
*cr
= 0;
345 struct retransmit_matchparam rm
;
347 spin_lock_irqsave(&(nb
->retrans_lock
), iflags
);
349 if (list_empty(&(nb
->retrans_list
))) {
350 nb
->retrans_timer_running
= 0;
355 cr
= container_of(nb
->retrans_list
.next
,
356 struct control_retrans
, timeout_list
);
358 BUG_ON(cr
->nb
!= nb
);
360 rm
.seqno
= cr
->seqno
;
363 list_del(&(cr
->timeout_list
));
365 if (unlikely(nbstate
== NEIGHBOR_STATE_KILLED
)) {
366 spin_unlock_irqrestore(&(nb
->retrans_lock
), iflags
);
368 htable_delete(&retransmits
, rm_to_key(&rm
), &rm
,
369 free_control_retrans
);
370 kref_put(&(cr
->ref
), free_control_retrans
);
374 if (time_after(cr
->timeout
, jiffies
)) {
375 list_add(&(cr
->timeout_list
), &(nb
->retrans_list
));
376 schedule_delayed_work(&(nb
->retrans_timer
),
377 cr
->timeout
- jiffies
);
381 if (unlikely(htable_delete(&retransmits
, rm_to_key(&rm
), &rm
,
382 free_control_retrans
)))
385 spin_unlock_irqrestore(&(nb
->retrans_lock
), iflags
);
387 while (list_empty(&(cr
->msgs
)) == 0) {
388 struct control_msg_out
*cm
= container_of(cr
->msgs
.next
,
389 struct control_msg_out
, lh
);
391 add_control_msg(cm
, 1);
394 kref_put(&(cr
->ref
), free_control_retrans
);
397 spin_unlock_irqrestore(&(nb
->retrans_lock
), iflags
);
400 kref_put(&(nb
->ref
), neighbor_free
);
403 static void schedule_retransmit(struct control_retrans
*cr
, struct neighbor
*nb
)
405 unsigned long iflags
;
407 struct retransmit_matchparam rm
;
410 rm
.seqno
= cr
->seqno
;
413 set_retrans_timeout(cr
, nb
);
415 spin_lock_irqsave(&(nb
->retrans_lock
), iflags
);
416 htable_insert(&retransmits
, (char *) cr
, rm_to_key(&rm
));
417 first
= list_empty(&(nb
->retrans_list
));
418 list_add_tail(&(cr
->timeout_list
), &(nb
->retrans_list
));
420 if (first
&& nb
->retrans_timer_running
== 0) {
421 schedule_delayed_work(&(nb
->retrans_timer
),
422 cr
->timeout
- jiffies
);
423 nb
->retrans_timer_running
= 1;
424 kref_get(&(nb
->ref
));
427 spin_unlock_irqrestore(&(nb
->retrans_lock
), iflags
);
430 void kern_ack_rcvd(struct neighbor
*nb
, __u32 seqno
)
432 unsigned long iflags
;
434 struct control_retrans
*cr
= 0;
435 struct retransmit_matchparam rm
;
440 spin_lock_irqsave(&(nb
->retrans_lock
), iflags
);
442 cr
= (struct control_retrans
*) htable_get(&retransmits
, rm_to_key(&rm
),
446 printk(KERN_ERR
"bogus/duplicate ack received");
450 if (unlikely(htable_delete(&retransmits
, rm_to_key(&rm
), &rm
,
451 free_control_retrans
)))
454 BUG_ON(cr
->nb
!= nb
);
456 list_del(&(cr
->timeout_list
));
459 spin_unlock_irqrestore(&(nb
->retrans_lock
), iflags
);
462 kref_put(&(cr
->ref
), free_control_retrans
); /* htable_get */
463 kref_put(&(cr
->ref
), free_control_retrans
); /* list */
467 static void padding(struct sk_buff
*skb
, int length
)
472 dst
= skb_put(skb
, length
);
474 memset(dst
, KP_PADDING
, length
);
477 static int add_ack(struct sk_buff
*skb
, struct control_retrans
*cr
,
478 struct control_msg_out
*cm
, int spaceleft
)
482 if (unlikely(spaceleft
< 5))
485 dst
= skb_put(skb
, 5);
489 put_u32(dst
+ 1, cm
->msg
.ack
.seqno
, 1);
491 atomic_dec(&(cm
->nb
->ucmcnt
));
492 free_control_msg(cm
);
497 static int add_ack_conn(struct sk_buff
*skb
, struct control_retrans
*cr
,
498 struct control_msg_out
*cm
, int spaceleft
)
503 if (unlikely(spaceleft
< cm
->length
))
506 dst
= skb_put(skb
, cm
->length
);
509 dst
[offset
] = KP_ACK_CONN
;
511 put_u32(dst
+ offset
, cm
->msg
.ack_conn
.conn_id
, 1);
513 dst
[offset
] = cm
->msg
.ack_conn
.flags
;
516 if ((cm
->msg
.ack_conn
.flags
& KP_ACK_CONN_FLAGS_SEQNO
) != 0) {
517 put_u32(dst
+ offset
, cm
->msg
.ack_conn
.seqno
, 1);
520 if ((cm
->msg
.ack_conn
.flags
& KP_ACK_CONN_FLAGS_WINDOW
) != 0) {
521 BUG_ON(cm
->msg
.ack_conn
.rconn
== 0);
522 dst
[offset
] = get_window(cm
->msg
.ack_conn
.rconn
,
528 if (ooolen(cm
->msg
.ack_conn
.flags
) != 0) {
529 put_u32(dst
+ offset
, cm
->msg
.ack_conn
.seqno_ooo
, 1);
531 if (ooolen(cm
->msg
.ack_conn
.flags
) == 1) {
532 BUG_ON(cm
->msg
.ack_conn
.length
> 255);
533 dst
[offset
] = cm
->msg
.ack_conn
.length
;
535 } else if (ooolen(cm
->msg
.ack_conn
.flags
) == 2) {
536 BUG_ON(cm
->msg
.ack_conn
.length
<= 255);
537 BUG_ON(cm
->msg
.ack_conn
.length
> 65535);
538 put_u16(dst
+ offset
, cm
->msg
.ack_conn
.length
, 1);
540 } else if (ooolen(cm
->msg
.ack_conn
.flags
) == 4) {
541 BUG_ON(cm
->msg
.ack_conn
.length
<= 65535);
542 put_u32(dst
+ offset
, cm
->msg
.ack_conn
.length
, 1);
549 if ((cm
->msg
.ack_conn
.flags
& KP_ACK_CONN_FLAGS_CREDITS
) != 0) {
550 __u16 value
= cm
->msg
.ack_conn
.decaytime
+ (
551 cm
->msg
.ack_conn
.decaytime_seqno
<< 10);
553 BUG_ON(cm
->msg
.ack_conn
.decaytime
>= 1024);
554 BUG_ON(cm
->msg
.ack_conn
.decaytime_seqno
>= 64);
556 put_u16(dst
+ offset
, value
, 1);
560 list_add_tail(&(cm
->lh
), &(cr
->msgs
));
562 BUG_ON(offset
!= cm
->length
);
566 static int add_ping(struct sk_buff
*skb
, __u32 cookie
,
571 if (unlikely(spaceleft
< 5))
574 dst
= skb_put(skb
, 5);
578 put_u32(dst
+ 1, cookie
, 0);
583 static int add_pong(struct sk_buff
*skb
, struct control_retrans
*cr
,
584 struct control_msg_out
*cm
, int spaceleft
)
588 if (unlikely(spaceleft
< 9))
591 if (cm
->msg
.pong
.type
== MSGTYPE_PONG_TIMEENQUEUED
) {
592 __s64 now
= ktime_to_ns(ktime_get());
593 __s64 enq
= ktime_to_ns(cm
->msg
.pong
.delaycomp
.time_enqueued
);
594 __s64 respdelay
= (now
- enq
+ 500) / 1000;
595 if (unlikely(respdelay
>= (1LL << 32)))
596 respdelay
= (1LL << 32) - 1;
597 cm
->msg
.pong
.type
= MSGTYPE_PONG_RESPDELAY
;
598 cm
->msg
.pong
.delaycomp
.respdelay
= (__u32
) respdelay
;
601 BUG_ON(cm
->msg
.pong
.type
!= MSGTYPE_PONG_RESPDELAY
);
603 dst
= skb_put(skb
, 9);
607 put_u32(dst
+ 1, cm
->msg
.pong
.cookie
, 0);
608 put_u32(dst
+ 5, cm
->msg
.pong
.delaycomp
.respdelay
, 1);
611 atomic_dec(&(cm
->nb
->ucmcnt
));
612 list_add_tail(&(cm
->lh
), &(cr
->msgs
));
617 #warning todo targettype might have changed
618 static __u16
get_credits(struct conn
*sconn
)
621 mutex_lock(&(sconn
->reversedir
->rcv_lock
));
622 BUG_ON(sconn
->reversedir
->targettype
!= TARGET_OUT
);
624 BUG_ON(sconn
->reversedir
->target
.out
.decaytime_last
>= 1024);
625 BUG_ON(sconn
->reversedir
->target
.out
.decaytime_seqno
>= 64);
626 ret
= sconn
->reversedir
->target
.out
.decaytime_last
+ (
627 sconn
->reversedir
->target
.out
.decaytime_seqno
<<
629 mutex_unlock(&(sconn
->reversedir
->rcv_lock
));
634 static int add_connect(struct sk_buff
*skb
, struct control_retrans
*cr
,
635 struct control_msg_out
*cm
, int spaceleft
)
639 if (unlikely(spaceleft
< 12))
642 dst
= skb_put(skb
, 12);
646 put_u32(dst
+ 1, cm
->msg
.connect
.conn_id
, 1);
647 put_u32(dst
+ 5, cm
->msg
.connect
.init_seqno
, 1);
648 BUG_ON(cm
->msg
.connect
.sconn
== 0);
649 dst
[9] = get_window(cm
->msg
.connect
.sconn
, cm
->nb
);
650 put_u16(dst
+ 10, get_credits(cm
->msg
.connect
.sconn
), 1);
652 list_add_tail(&(cm
->lh
), &(cr
->msgs
));
657 static int add_connect_success(struct sk_buff
*skb
, struct control_retrans
*cr
,
658 struct control_msg_out
*cm
, int spaceleft
)
662 if (unlikely(spaceleft
< 16))
665 dst
= skb_put(skb
, 16);
668 dst
[0] = KP_CONNECT_SUCCESS
;
669 put_u32(dst
+ 1, cm
->msg
.connect_success
.rcvd_conn_id
, 1);
670 put_u32(dst
+ 5, cm
->msg
.connect_success
.gen_conn_id
, 1);
671 put_u32(dst
+ 9, cm
->msg
.connect_success
.init_seqno
, 1);
672 BUG_ON(cm
->msg
.connect_success
.rconn
== 0);
673 dst
[13] = get_window(cm
->msg
.connect_success
.rconn
, cm
->nb
);
674 put_u16(dst
+ 14, get_credits(cm
->msg
.connect_success
.rconn
), 1);
676 list_add_tail(&(cm
->lh
), &(cr
->msgs
));
681 static int add_reset_conn(struct sk_buff
*skb
, struct control_retrans
*cr
,
682 struct control_msg_out
*cm
, int spaceleft
)
686 if (unlikely(spaceleft
< 5))
689 dst
= skb_put(skb
, 5);
692 dst
[0] = KP_RESET_CONN
;
693 put_u32(dst
+ 1, cm
->msg
.reset_connidunknown
.conn_id_reset
, 1);
695 list_add_tail(&(cm
->lh
), &(cr
->msgs
));
700 static int add_conndata(struct sk_buff
*skb
, struct control_retrans
*cr
,
701 struct control_msg_out
*cm
, int spaceleft
,
702 struct control_msg_out
**split_conndata
, __u32
*sc_sendlen
)
706 int totallen
= cm
->msg
.conn_data
.datalen
+ 11;
707 int putlen
= min(totallen
, spaceleft
);
708 int dataputlen
= putlen
- 11;
710 BUG_ON(split_conndata
== 0);
711 BUG_ON(sc_sendlen
== 0);
713 if (dataputlen
< 1 || (spaceleft
< 25 && spaceleft
< totallen
))
716 dst
= skb_put(skb
, putlen
);
719 dst
[0] = KP_CONN_DATA
;
720 put_u32(dst
+ 1, cm
->msg
.conn_data
.conn_id
, 1);
721 put_u32(dst
+ 5, cm
->msg
.conn_data
.seqno
, 1);
722 put_u16(dst
+ 9, dataputlen
, 1);
724 memcpy(dst
+ 11, cm
->msg
.conn_data
.data
, dataputlen
);
726 if (cm
->msg
.conn_data
.datalen
== dataputlen
) {
727 list_add_tail(&(cm
->lh
), &(cr
->msgs
));
729 *split_conndata
= cm
;
730 *sc_sendlen
= dataputlen
;
736 static int add_connid_unknown(struct sk_buff
*skb
, struct control_retrans
*cr
,
737 struct control_msg_out
*cm
, int spaceleft
)
741 if (unlikely(spaceleft
< 5))
744 dst
= skb_put(skb
, 5);
747 dst
[0] = KP_CONNID_UNKNOWN
;
748 put_u32(dst
+ 1, cm
->msg
.reset_connidunknown
.conn_id_unknown
, 1);
750 list_add_tail(&(cm
->lh
), &(cr
->msgs
));
755 static int add_set_max_cmsg_dly(struct sk_buff
*skb
, struct control_retrans
*cr
,
756 struct control_msg_out
*cm
, int spaceleft
)
760 if (unlikely(spaceleft
< 5))
763 dst
= skb_put(skb
, 5);
766 dst
[0] = KP_SET_MAX_CMSG_DELAY
;
767 put_u32(dst
+ 1, cm
->msg
.set_max_cmsg_delay
.delay
, 1);
769 list_add_tail(&(cm
->lh
), &(cr
->msgs
));
774 static int add_message(struct sk_buff
*skb
, struct control_retrans
*cr
,
775 struct control_msg_out
*cm
, int spaceleft
,
776 struct control_msg_out
**split_conndata
, __u32
*sc_sendlen
)
778 BUG_ON(split_conndata
!= 0 && *split_conndata
!= 0);
779 BUG_ON(sc_sendlen
!= 0 && *sc_sendlen
!= 0);
783 return add_ack(skb
, cr
, cm
, spaceleft
);
784 case MSGTYPE_ACK_CONN
:
785 return add_ack_conn(skb
, cr
, cm
, spaceleft
);
787 return add_pong(skb
, cr
, cm
, spaceleft
);
788 case MSGTYPE_CONNECT
:
789 return add_connect(skb
, cr
, cm
, spaceleft
);
790 case MSGTYPE_CONNECT_SUCCESS
:
791 return add_connect_success(skb
, cr
, cm
, spaceleft
);
792 case MSGTYPE_RESET_CONN
:
793 return add_reset_conn(skb
, cr
, cm
, spaceleft
);
794 case MSGTYPE_CONNDATA
:
795 return add_conndata(skb
, cr
, cm
, spaceleft
, split_conndata
,
797 case MSGTYPE_CONNID_UNKNOWN
:
798 return add_connid_unknown(skb
, cr
, cm
, spaceleft
);
799 case MSGTYPE_SET_MAX_CMSG_DELAY
:
800 return add_set_max_cmsg_dly(skb
, cr
, cm
, spaceleft
);
808 static __u32
__send_messages(struct neighbor
*nb
, struct sk_buff
*skb
,
809 struct control_retrans
*cr
, int spaceleft
, int urgentonly
,
810 struct control_msg_out
**split_conndata
, __u32
*sc_sendlen
)
813 mutex_lock(&(nb
->cmsg_lock
));
814 while (!list_empty(&(nb
->ucontrol_msgs_out
)) || (!urgentonly
&&
815 !list_empty(&(nb
->control_msgs_out
)))) {
818 int urgent
= !list_empty(&(nb
->ucontrol_msgs_out
));
820 struct control_msg_out
*cm
;
823 cm
= container_of(nb
->ucontrol_msgs_out
.next
,
824 struct control_msg_out
, lh
);
826 cm
= container_of(nb
->control_msgs_out
.next
,
827 struct control_msg_out
, lh
);
831 nb
->ucmlength
-= cm
->length
;
833 nb
->cmlength
-= cm
->length
;
834 mutex_unlock(&(nb
->cmsg_lock
));
835 rc
= add_message(skb
, cr
, cm
, spaceleft
- length
,
836 split_conndata
, sc_sendlen
);
837 mutex_lock(&(nb
->cmsg_lock
));
841 list_add(&(cm
->lh
), &(nb
->ucontrol_msgs_out
));
842 nb
->ucmlength
+= cm
->length
;
844 list_add(&(cm
->lh
), &(nb
->control_msgs_out
));
845 nb
->cmlength
+= cm
->length
;
852 mutex_unlock(&(nb
->cmsg_lock
));
857 static int __send_messages_smcd(struct neighbor
*nb
, struct sk_buff
*skb
,
858 struct control_retrans
*cr
, int spaceleft
)
860 struct control_msg_out
*cm
;
863 cm
= alloc_control_msg(nb
, ACM_PRIORITY_LOW
);
865 if (unlikely(cm
== 0))
868 cm
->type
= MSGTYPE_SET_MAX_CMSG_DELAY
;
869 cm
->msg
.set_max_cmsg_delay
.delay
= CMSG_INTERVAL_MS
* 10;
872 rc
= add_message(skb
, cr
, cm
, spaceleft
, 0, 0);
874 nb
->max_cmsg_delay_sent
= 1;
879 static int _send_messages(struct neighbor
*nb
, struct sk_buff
*skb
, int ping
,
880 struct control_retrans
*cr
, int spaceleft
, int urgentonly
)
884 __u32 pingcookie
= 0;
885 unsigned long last_ping_time
;
886 struct control_msg_out
*split_conndata
= 0;
887 __u32 sc_sendlen
= 0;
889 mutex_lock(&(nb
->cmsg_lock
));
893 pingcookie
= add_ping_req(nb
, &last_ping_time
);
894 rc
= add_ping(skb
, pingcookie
, spaceleft
- length
);
899 if (likely(urgentonly
== 0) && unlikely(nb
->max_cmsg_delay_sent
== 0))
900 length
+= __send_messages_smcd(nb
, skb
, cr
, spaceleft
- length
);
902 mutex_unlock(&(nb
->cmsg_lock
));
904 length
+= __send_messages(nb
, skb
, cr
, spaceleft
- length
, urgentonly
,
905 &split_conndata
, &sc_sendlen
);
907 if (unlikely(length
> spaceleft
))
908 printk(KERN_ERR
"error cor/kpacket_gen: length > spaceleft!?");
910 padding(skb
, spaceleft
- length
);
912 rc
= dev_queue_xmit(skb
);
915 unadd_ping_req(nb
, pingcookie
, last_ping_time
);
917 while (list_empty(&(cr
->msgs
)) == 0) {
918 struct control_msg_out
*cm
= container_of(cr
->msgs
.prev
,
919 struct control_msg_out
, lh
);
921 add_control_msg(cm
, 1);
924 if (split_conndata
!= 0) {
925 add_control_msg(split_conndata
, 1);
928 kref_put(&(cr
->ref
), free_control_retrans
);
930 struct list_head
*curr
= cr
->msgs
.next
;
932 while(curr
!= &(cr
->msgs
)) {
933 struct control_msg_out
*cm
= container_of(curr
,
934 struct control_msg_out
, lh
);
938 if (cm
->type
== MSGTYPE_CONNDATA
) {
940 kfree(cm
->msg
.conn_data
.data_orig
);
941 free_control_msg(cm
);
945 if (split_conndata
!= 0) {
946 BUG_ON(sc_sendlen
== 0);
948 split_conndata
->msg
.conn_data
.datalen
);
950 split_conndata
->msg
.conn_data
.data
+= sc_sendlen
;
951 split_conndata
->msg
.conn_data
.datalen
-= sc_sendlen
;
953 send_conndata(split_conndata
,
954 split_conndata
->msg
.conn_data
.conn_id
,
955 split_conndata
->msg
.conn_data
.seqno
,
956 split_conndata
->msg
.conn_data
.data_orig
,
957 split_conndata
->msg
.conn_data
.data
,
958 split_conndata
->msg
.conn_data
.datalen
);
962 if (list_empty(&(cr
->msgs
)))
963 kref_put(&(cr
->ref
), free_control_retrans
);
965 schedule_retransmit(cr
, nb
);
971 static __u32
get_total_messages_length(struct neighbor
*nb
, int ping
,
974 __u32 length
= nb
->ucmlength
;
976 if (likely(urgentonly
== 0)) {
977 length
+= nb
->cmlength
;
979 if (unlikely(nb
->max_cmsg_delay_sent
== 0))
982 if (ping
== 2 || (length
> 0 && ping
!= 0))
988 static int reset_timeouted_conn_needed(struct neighbor
*nb
, struct conn
*src_in
)
990 if (unlikely(unlikely(src_in
->sourcetype
!= SOURCE_IN
) ||
991 unlikely(src_in
->source
.in
.nb
!= nb
) ||
992 unlikely(atomic_read(&(src_in
->isreset
)) != 0)))
994 else if (likely(time_after(src_in
->source
.in
.jiffies_last_act
+
995 CONN_ACTIVITY_UPDATEINTERVAL_SEC
* HZ
+
996 CONN_INACTIVITY_TIMEOUT_SEC
* HZ
, jiffies
)))
1002 static void reset_timeouted_conns(struct neighbor
*nb
)
1005 for (i
=0;i
<10000;i
++) {
1006 unsigned long iflags
;
1007 struct conn
*src_in
;
1012 spin_lock_irqsave(&(nb
->conn_list_lock
), iflags
);
1014 if (list_empty(&(nb
->rcv_conn_list
))) {
1015 spin_unlock_irqrestore(&(nb
->conn_list_lock
), iflags
);
1019 src_in
= container_of(nb
->rcv_conn_list
.next
, struct conn
,
1021 kref_get(&(src_in
->ref
));
1023 spin_unlock_irqrestore(&(nb
->conn_list_lock
), iflags
);
1026 mutex_lock(&(src_in
->rcv_lock
));
1028 resetrc
= reset_timeouted_conn_needed(nb
, src_in
);
1029 if (likely(resetrc
== 0))
1032 rc
= send_reset_conn(nb
, src_in
->reversedir
->target
.out
.conn_id
,
1033 src_in
->source
.in
.conn_id
, 1);
1034 if (unlikely(rc
!= 0))
1037 atomic_cmpxchg(&(src_in
->reversedir
->isreset
), 0, 1);
1038 mutex_unlock(&(src_in
->rcv_lock
));
1043 mutex_unlock(&(src_in
->rcv_lock
));
1045 kref_put(&(src_in
->ref
), free_conn
);
1047 if (likely(resetrc
== 0) || rc
!= 0)
1052 int send_messages(struct neighbor
*nb
, int resume
)
1057 int targetmss
= mss(nb
);
1059 int nbstate
= get_neigh_state(nb
);
1060 int urgentonly
= (nbstate
!= NEIGHBOR_STATE_ACTIVE
);
1062 if (likely(urgentonly
== 0))
1063 reset_timeouted_conns(nb
);
1065 mutex_lock(&(nb
->send_cmsg_lock
));
1066 mutex_lock(&(nb
->cmsg_lock
));
1068 ping
= time_to_send_ping(nb
);
1074 struct sk_buff
*skb
;
1075 struct control_retrans
*cr
;
1077 BUG_ON(list_empty(&(nb
->control_msgs_out
)) &&
1078 (nb
->cmlength
!= 0));
1079 BUG_ON((list_empty(&(nb
->control_msgs_out
)) == 0) &&
1080 (nb
->cmlength
== 0));
1081 BUG_ON(list_empty(&(nb
->ucontrol_msgs_out
)) &&
1082 (nb
->ucmlength
!= 0));
1083 BUG_ON((list_empty(&(nb
->ucontrol_msgs_out
)) == 0) &&
1084 (nb
->ucmlength
== 0));
1085 BUG_ON(nb
->cmlength
< 0);
1086 BUG_ON(nb
->ucmlength
< 0);
1088 length
= get_total_messages_length(nb
, ping
, urgentonly
);
1093 if (length
< targetmss
&& i
> 0)
1096 seqno
= atomic_add_return(1, &(nb
->kpacket_seqno
));
1098 if (length
> targetmss
)
1101 mutex_unlock(&(nb
->cmsg_lock
));
1102 skb
= create_packet(nb
, length
, GFP_KERNEL
, 0, seqno
);
1103 if (unlikely(skb
== 0)) {
1104 printk(KERN_ERR
"cor: send_messages: cannot allocate "
1105 "skb (out of memory?)");
1109 cr
= kmem_cache_alloc(controlretrans_slab
, GFP_KERNEL
);
1110 if (unlikely(cr
== 0)) {
1112 printk(KERN_ERR
"cor: send_messages: cannot allocate "
1113 "control_retrans (out of memory?)");
1116 memset(cr
, 0, sizeof(struct control_retrans
));
1117 kref_init(&(cr
->ref
));
1120 INIT_LIST_HEAD(&(cr
->msgs
));
1122 rc
= _send_messages(nb
, skb
, ping
, cr
, length
, urgentonly
);
1125 mutex_lock(&(nb
->cmsg_lock
));
1133 mutex_lock(&(nb
->cmsg_lock
));
1138 qos_enqueue(nb
->dev
, &(nb
->rb_kp
), QOS_CALLER_KPACKET
);
1140 atomic_set(&(nb
->cmsg_work_scheduled
), 0);
1141 schedule_controlmsg_timer(nb
);
1144 mutex_unlock(&(nb
->cmsg_lock
));
1145 mutex_unlock(&(nb
->send_cmsg_lock
));
1148 kref_put(&(nb
->ref
), neighbor_free
);
1153 void controlmsg_workfunc(struct work_struct
*work
)
1155 struct neighbor
*nb
= container_of(work
, struct neighbor
, cmsg_work
);
1156 send_messages(nb
, 0);
1159 static void schedule_cmsg_work(struct neighbor
*nb
)
1161 if (atomic_cmpxchg(&(nb
->cmsg_work_scheduled
), 0, 1) == 0) {
1162 kref_get(&(nb
->ref
));
1163 atomic_cmpxchg(&(nb
->cmsg_timer_running
), 1, 2);
1164 schedule_work(&(nb
->cmsg_work
));
1168 void controlmsg_timerfunc(unsigned long arg
)
1170 struct neighbor
*nb
= (struct neighbor
*) arg
;
1172 int oldval
= atomic_xchg(&(nb
->cmsg_timer_running
), 0);
1174 BUG_ON(oldval
== 0);
1176 if (likely(oldval
== 1))
1177 schedule_cmsg_work(nb
);
1178 kref_put(&(nb
->ref
), neighbor_free
);
1181 static unsigned long get_cmsg_timeout(struct neighbor
*nb
, int nbstate
)
1183 unsigned long timeout
= get_next_ping_time(nb
);
1185 if (likely(nbstate
== NEIGHBOR_STATE_ACTIVE
) &&
1186 list_empty(&(nb
->control_msgs_out
)) == 0) {
1187 struct control_msg_out
*first
= container_of(
1188 nb
->control_msgs_out
.next
,
1189 struct control_msg_out
, lh
);
1190 if (time_before(first
->timeout
, jiffies
+
1191 usecs_to_jiffies(nb
->cmsg_interval
)))
1193 else if (time_before(first
->timeout
, timeout
))
1194 timeout
= first
->timeout
;
1197 if (list_empty(&(nb
->ucontrol_msgs_out
)) == 0) {
1198 struct control_msg_out
*first
= container_of(
1199 nb
->ucontrol_msgs_out
.next
,
1200 struct control_msg_out
, lh
);
1201 if (time_before(first
->timeout
, jiffies
+
1202 usecs_to_jiffies(nb
->cmsg_interval
)))
1204 else if (time_before(first
->timeout
, timeout
))
1205 timeout
= first
->timeout
;
1211 static int cmsg_full_packet(struct neighbor
*nb
, int nbstate
)
1213 int ping
= time_to_send_ping(nb
);
1214 int urgentonly
= (nbstate
!= NEIGHBOR_STATE_ACTIVE
);
1215 __u32 len
= get_total_messages_length(nb
, ping
, urgentonly
);
1225 void schedule_controlmsg_timer(struct neighbor
*nb
)
1227 unsigned long timeout
;
1228 int state
= get_neigh_state(nb
);
1230 if (unlikely(state
== NEIGHBOR_STATE_KILLED
)) {
1231 atomic_cmpxchg(&(nb
->cmsg_timer_running
), 1, 2);
1235 if (unlikely(atomic_read(&(nb
->cmsg_work_scheduled
)) == 1))
1238 if (cmsg_full_packet(nb
, state
))
1241 timeout
= get_cmsg_timeout(nb
, state
);
1243 if (time_before_eq(timeout
, jiffies
)) {
1245 schedule_cmsg_work(nb
);
1247 if (atomic_xchg(&(nb
->cmsg_timer_running
), 1) == 0)
1248 kref_get(&(nb
->ref
));
1249 mod_timer(&(nb
->cmsg_timer
), timeout
);
1253 static void free_oldest_ucm(struct neighbor
*nb
)
1255 struct control_msg_out
*cm
= container_of(nb
->ucontrol_msgs_out
.next
,
1256 struct control_msg_out
, lh
);
1258 BUG_ON(list_empty(&(nb
->ucontrol_msgs_out
)));
1259 BUG_ON(isurgent(cm
) == 0);
1261 list_del(&(cm
->lh
));
1262 nb
->ucmlength
-= cm
->length
;
1263 atomic_dec(&(nb
->ucmcnt
));
1264 free_control_msg(cm
);
1267 static void add_control_msg(struct control_msg_out
*cm
, int retrans
)
1271 unsigned long jiffies_tmp
;
1273 BUG_ON(cm
->nb
== 0);
1275 nbstate
= get_neigh_state(cm
->nb
);
1278 BUG_ON(cm
->lh
.next
!= LIST_POISON1
|| cm
->lh
.prev
!= LIST_POISON2
);
1280 cm
->timeout
= jiffies
+ msecs_to_jiffies(CMSG_INTERVAL_MS
);
1282 mutex_lock(&(cm
->nb
->cmsg_lock
));
1287 msgs
= atomic_inc_return(&(cm
->nb
->ucmcnt
));
1290 if (unlikely(retrans
)) {
1291 if (msgs
> MAX_URGENT_CMSGS_PER_NEIGH_RETRANSALLOW
||
1292 msgs
> MAX_URGENT_CMSGS_PER_NEIGH
) {
1293 atomic_dec(&(cm
->nb
->ucmcnt
));
1294 free_control_msg(cm
);
1298 cm
->nb
->ucmlength
+= cm
->length
;
1299 list_add(&(cm
->lh
), &(cm
->nb
->ucontrol_msgs_out
));
1301 if (msgs
> MAX_URGENT_CMSGS_PER_NEIGH
) {
1302 free_oldest_ucm(cm
->nb
);
1305 cm
->nb
->ucmlength
+= cm
->length
;
1306 list_add_tail(&(cm
->lh
), &(cm
->nb
->ucontrol_msgs_out
));
1309 cm
->nb
->cmlength
+= cm
->length
;
1310 list_add_tail(&(cm
->lh
), &(cm
->nb
->control_msgs_out
));
1313 jiffies_tmp
= jiffies
;
1314 newinterval
= (((__u64
) cm
->nb
->cmsg_interval
) * 255 +
1315 jiffies_to_usecs(jiffies_tmp
-
1316 cm
->nb
->jiffies_last_cmsg
)) / 256;
1317 cm
->nb
->jiffies_last_cmsg
= jiffies_tmp
;
1318 if (unlikely(newinterval
> (1LL << 32) - 1))
1319 cm
->nb
->cmsg_interval
= (__u32
) ((1LL << 32) - 1);
1321 cm
->nb
->cmsg_interval
= newinterval
;
1323 schedule_controlmsg_timer(cm
->nb
);
1326 mutex_unlock(&(cm
->nb
->cmsg_lock
));
1329 void send_pong(struct neighbor
*nb
, __u32 cookie
)
1331 struct control_msg_out
*cm
= _alloc_control_msg(nb
, 0, 1);
1333 if (unlikely(cm
== 0))
1337 cm
->type
= MSGTYPE_PONG
;
1338 cm
->msg
.pong
.cookie
= cookie
;
1339 cm
->msg
.pong
.type
= MSGTYPE_PONG_TIMEENQUEUED
;
1340 cm
->msg
.pong
.delaycomp
.time_enqueued
= ktime_get();
1342 add_control_msg(cm
, 0);
1345 int send_reset_conn(struct neighbor
*nb
, __u32 conn_id_reset
,
1346 __u32 conn_id_unknown
, int lowprio
)
1348 unsigned long iflags
;
1349 struct unknownconnid_matchparam ucm
;
1350 struct control_msg_out
*cm
= alloc_control_msg(nb
, lowprio
?
1351 ACM_PRIORITY_LOW
: ACM_PRIORITY_HIGH
);
1353 if (unlikely(cm
== 0))
1356 cm
->type
= MSGTYPE_RESET_CONN
;
1357 cm
->msg
.reset_connidunknown
.conn_id_reset
= conn_id_reset
;
1358 cm
->msg
.reset_connidunknown
.conn_id_unknown
= conn_id_unknown
;
1362 ucm
.conn_id
= conn_id_unknown
;
1364 spin_lock_irqsave(&unknown_connids_lock
, iflags
);
1365 BUG_ON(htable_get(&unknown_connids
, ucm_to_key(&ucm
), &ucm
) != 0);
1366 htable_insert(&unknown_connids
, (char *) cm
, ucm_to_key(&ucm
));
1367 spin_unlock_irqrestore(&unknown_connids_lock
, iflags
);
1369 add_control_msg(cm
, 0);
1374 void send_ack(struct neighbor
*nb
, __u32 seqno
)
1376 struct control_msg_out
*cm
= _alloc_control_msg(nb
, 0, 1);
1378 if (unlikely(cm
== 0))
1382 cm
->type
= MSGTYPE_ACK
;
1383 cm
->msg
.ack
.seqno
= seqno
;
1385 add_control_msg(cm
, 0);
1388 #warning todo conn naming/locking
1389 void send_ack_conn(struct control_msg_out
*cm
, struct conn
*rconn
,
1390 __u32 conn_id
, __u32 seqno
)
1392 cm
->type
= MSGTYPE_ACK_CONN
;
1393 kref_get(&(rconn
->ref
));
1394 BUG_ON(rconn
->sourcetype
!= SOURCE_IN
);
1395 cm
->msg
.ack_conn
.flags
= KP_ACK_CONN_FLAGS_SEQNO
|
1396 KP_ACK_CONN_FLAGS_WINDOW
;
1397 cm
->msg
.ack_conn
.rconn
= rconn
;
1398 cm
->msg
.ack_conn
.conn_id
= conn_id
;
1399 cm
->msg
.ack_conn
.seqno
= seqno
;
1400 cm
->length
= 6 + ack_conn_len(cm
->msg
.ack_conn
.flags
);
1401 add_control_msg(cm
, 0);
1404 void send_ack_conn_ooo(struct control_msg_out
*cm
, struct conn
*rconn
,
1405 __u32 conn_id
, __u32 seqno_ooo
, __u32 length
)
1407 cm
->type
= MSGTYPE_ACK_CONN
;
1408 kref_get(&(rconn
->ref
));
1409 BUG_ON(rconn
->sourcetype
!= SOURCE_IN
);
1410 cm
->msg
.ack_conn
.flags
= ooolen_to_flags(length
);
1411 cm
->msg
.ack_conn
.rconn
= rconn
;
1412 cm
->msg
.ack_conn
.conn_id
= conn_id
;
1413 cm
->msg
.ack_conn
.seqno_ooo
= seqno_ooo
;
1414 cm
->msg
.ack_conn
.length
= length
;
1415 cm
->length
= 6 + ack_conn_len(cm
->msg
.ack_conn
.flags
);
1416 add_control_msg(cm
, 0);
1419 void send_decaytime(struct conn
*rconn
, int force
, __u16 decaytime
)
1421 struct control_msg_out
*cm
;
1423 #warning todo unforced send
1427 cm
= alloc_control_msg(rconn
->target
.out
.nb
, ACM_PRIORITY_LOW
);
1432 cm
->type
= MSGTYPE_ACK_CONN
;
1433 kref_get(&(rconn
->ref
));
1434 BUG_ON(rconn
->targettype
!= TARGET_OUT
);
1435 cm
->msg
.ack_conn
.flags
= KP_ACK_CONN_FLAGS_CREDITS
;
1436 cm
->msg
.ack_conn
.rconn
= rconn
->reversedir
;
1437 cm
->msg
.ack_conn
.conn_id
= rconn
->target
.out
.conn_id
;
1438 cm
->msg
.ack_conn
.decaytime_seqno
= rconn
->target
.out
.decaytime_seqno
;
1439 cm
->msg
.ack_conn
.decaytime
= decaytime
;
1441 cm
->length
= 6 + ack_conn_len(cm
->msg
.ack_conn
.flags
);
1442 add_control_msg(cm
, 0);
1444 rconn
->target
.out
.decaytime_last
= decaytime
;
1445 rconn
->target
.out
.decaytime_seqno
= (rconn
->target
.out
.decaytime_seqno
+
1447 rconn
->target
.out
.decaytime_send_allowed
= 0;
1450 void send_connect_success(struct control_msg_out
*cm
, __u32 rcvd_conn_id
,
1451 __u32 gen_conn_id
, __u32 init_seqno
, struct conn
*rconn
)
1453 cm
->type
= MSGTYPE_CONNECT_SUCCESS
;
1454 cm
->msg
.connect_success
.rcvd_conn_id
= rcvd_conn_id
;
1455 cm
->msg
.connect_success
.gen_conn_id
= gen_conn_id
;
1456 cm
->msg
.connect_success
.init_seqno
= init_seqno
;
1457 kref_get(&(rconn
->ref
));
1458 cm
->msg
.connect_success
.rconn
= rconn
;
1460 add_control_msg(cm
, 0);
1463 void send_connect_nb(struct control_msg_out
*cm
, __u32 conn_id
,
1464 __u32 init_seqno
, struct conn
*sconn
)
1466 cm
->type
= MSGTYPE_CONNECT
;
1467 cm
->msg
.connect
.conn_id
= conn_id
;
1468 cm
->msg
.connect
.init_seqno
= init_seqno
;
1469 kref_get(&(sconn
->ref
));
1470 BUG_ON(sconn
->sourcetype
!= SOURCE_IN
);
1471 cm
->msg
.connect
.sconn
= sconn
;
1473 add_control_msg(cm
, 0);
1476 void send_conndata(struct control_msg_out
*cm
, __u32 conn_id
, __u32 seqno
,
1477 char *data_orig
, char *data
, __u32 datalen
)
1479 cm
->type
= MSGTYPE_CONNDATA
;
1480 cm
->msg
.conn_data
.conn_id
= conn_id
;
1481 cm
->msg
.conn_data
.seqno
= seqno
;
1482 cm
->msg
.conn_data
.data_orig
= data_orig
;
1483 cm
->msg
.conn_data
.data
= data
;
1484 cm
->msg
.conn_data
.datalen
= datalen
;
1485 cm
->length
= 11 + datalen
;
1486 add_control_msg(cm
, 0);
1489 void send_connid_unknown(struct neighbor
*nb
, __u32 conn_id
)
1491 unsigned long iflags
;
1493 struct unknownconnid_matchparam ucm
;
1495 struct control_msg_out
*cm
= alloc_control_msg(nb
, ACM_PRIORITY_HIGH
);
1497 if (unlikely(cm
== 0))
1500 cm
->type
= MSGTYPE_CONNID_UNKNOWN
;
1501 cm
->msg
.reset_connidunknown
.conn_id_unknown
= conn_id
;
1505 ucm
.conn_id
= conn_id
;
1507 spin_lock_irqsave(&unknown_connids_lock
, iflags
);
1508 ret
= htable_get(&unknown_connids
, ucm_to_key(&ucm
), &ucm
);
1510 htable_insert(&unknown_connids
, (char *) cm
, ucm_to_key(&ucm
));
1511 spin_unlock_irqrestore(&unknown_connids_lock
, iflags
);
1514 struct control_msg_out
*cm2
= (struct control_msg_out
*) ret
;
1516 BUG_ON(cm2
->type
!= MSGTYPE_RESET_CONN
&&
1517 cm2
->type
!= MSGTYPE_CONNID_UNKNOWN
);
1519 kref_put(&(cm2
->ref
), cmsg_kref_free
);
1521 free_control_msg(cm
);
1523 add_control_msg(cm
, 0);
1528 static int matches_connretrans(void *htentry
, void *searcheditem
)
1530 struct control_retrans
*cr
= (struct control_retrans
*) htentry
;
1531 struct retransmit_matchparam
*rm
= (struct retransmit_matchparam
*)
1534 return rm
->nb
== cr
->nb
&& rm
->seqno
== cr
->seqno
;
1537 static int matches_unknownconnid(void *htentry
, void *searcheditem
)
1539 struct control_msg_out
*cm
= (struct control_msg_out
*) htentry
;
1541 struct unknownconnid_matchparam
*ucm
=
1542 (struct unknownconnid_matchparam
*)searcheditem
;
1544 BUG_ON(cm
->type
!= MSGTYPE_RESET_CONN
&&
1545 cm
->type
!= MSGTYPE_CONNID_UNKNOWN
);
1547 return ucm
->nb
== cm
->nb
&& ucm
->conn_id
==
1548 cm
->msg
.reset_connidunknown
.conn_id_unknown
;
1551 void __init
cor_kgen_init(void)
1553 controlmsg_slab
= kmem_cache_create("cor_controlmsg",
1554 sizeof(struct control_msg_out
), 8, 0, 0);
1555 controlretrans_slab
= kmem_cache_create("cor_controlretransmsg",
1556 sizeof(struct control_retrans
), 8, 0, 0);
1557 htable_init(&retransmits
, matches_connretrans
,
1558 offsetof(struct control_retrans
, htab_entry
),
1559 offsetof(struct control_retrans
, ref
));
1560 htable_init(&unknown_connids
, matches_unknownconnid
,
1561 offsetof(struct control_msg_out
,
1562 msg
.reset_connidunknown
.htab_entry
),
1563 offsetof(struct control_msg_out
, ref
));
1566 MODULE_LICENSE("GPL");