2 * Connection oriented routing
3 * Copyright (C) 2007-2010 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/mutex.h>
25 DEFINE_MUTEX(cor_bindnodes
);
26 DEFINE_SPINLOCK(conn_free
);
28 DEFINE_MUTEX(connid_gen
);
39 struct kmem_cache
*conn_slab
;
41 struct htable connid_table
;
42 struct htable reverse_connid_table
;
46 struct kmem_cache
*bindnode_slab
;
47 struct kmem_cache
*connlistener_slab
;
49 /* see cor.h/KP_ACK_CONN */
50 static const __u32 log_64_11_table
[] = {0,
51 64, 68, 73, 77, 82, 88, 93, 99, 106, 113, 120,
52 128, 136, 145, 155, 165, 175, 187, 199, 212, 226, 240,
53 256, 273, 290, 309, 329, 351, 374, 398, 424, 451, 481,
54 512, 545, 581, 619, 659, 702, 747, 796, 848, 903, 961,
55 1024, 1091, 1162, 1237, 1318, 1403, 1495, 1592, 1695, 1805,
57 2048, 2181, 2323, 2474, 2635, 2806, 2989, 3183, 3390, 3611,
59 4096, 4362, 4646, 4948, 5270, 5613, 5978, 6367, 6781, 7222,
61 8192, 8725, 9292, 9897, 10540, 11226, 11956, 12734, 13562,
63 16384, 17450, 18585, 19793, 21081, 22452, 23912, 25467, 27124,
65 32768, 34899, 37169, 39587, 42161, 44904, 47824, 50935, 54248,
67 65536, 69799, 74338, 79173, 84323, 89807, 95648, 101870, 108495,
69 131072, 139597, 148677, 158347, 168646, 179615, 191297, 203739,
70 216991, 231104, 246135,
71 262144, 279194, 297353, 316693, 337291, 359229, 382594, 407478,
72 433981, 462208, 492270,
73 524288, 558388, 594706, 633387, 674583, 718459, 765188, 814957,
74 867962, 924415, 984540,
75 1048576, 1116777, 1189413, 1266774, 1349166, 1436917, 1530376,
76 1629913, 1735924, 1848831, 1969081,
77 2097152, 2233553, 2378826, 2533547, 2698332, 2873834, 3060752,
78 3259826, 3471849, 3697662, 3938162,
79 4194304, 4467106, 4757652, 5067094, 5396664, 5747669, 6121503,
80 6519652, 6943698, 7395323, 7876323,
81 8388608, 8934212, 9515303, 10134189, 10793327, 11495337,
82 12243006, 13039305, 13887396, 14790647,
84 16777216, 17868424, 19030606, 20268378, 21586655, 22990674,
85 24486013, 26078610, 27774791, 29581294,
87 33554432, 35736849, 38061212, 40536755, 43173310, 45981349,
88 48972026, 52157220, 55549582, 59162588,
90 67108864, 71473698, 76122425, 81073510, 86346620, 91962698,
91 97944052, 104314440, 111099165, 118325175,
93 134217728, 142947395, 152244850, 162147020, 172693239,
94 183925396, 195888104, 208628880, 222198329,
96 268435456, 285894791, 304489699, 324294041, 345386479,
97 367850791, 391776208, 417257759, 444396658,
99 536870912, 571789581};
101 __u8
enc_log_64_11(__u32 value
)
104 BUG_ON(log_64_11_table
[255] != 571789581);
105 for (i
=1;i
<256;i
++) {
106 if (log_64_11_table
[i
] > value
)
110 return (__u8
)(i
-1); /* round down */
113 __u32
dec_log_64_11(__u8 value
)
115 BUG_ON(log_64_11_table
[255] != 571789581);
116 return log_64_11_table
[value
];
119 static inline __u64
mul_saturated(__u64 a
, __u64 b
)
122 if (unlikely(res
/ a
!= b
))
127 static inline int numdigits(__u64 value
)
130 for (;value
!= 0;value
= (value
>> 1)) {
136 /* approximate (a*b) / c without overflowing a*b */
137 __u64
multiply_div(__u64 a
, __u64 b
, __u64 c
)
139 int alen
= numdigits(a
);
140 int blen
= numdigits(b
);
141 int clen
= numdigits(c
);
143 BUG_ON(alen
< 0 || alen
> 64);
144 BUG_ON(blen
< 0 || blen
> 64);
145 BUG_ON(clen
< 0 || clen
> 64);
147 BUG_ON((a
== 0 && alen
!= 0) || (a
!= 0 && alen
== 0));
148 BUG_ON((b
== 0 && blen
!= 0) || (b
!= 0 && blen
== 0));
149 BUG_ON((c
== 0 && clen
!= 0) || (c
!= 0 && clen
== 0));
151 BUG_ON(a
>= b
&& alen
< blen
);
152 BUG_ON(a
>= c
&& alen
< clen
);
153 BUG_ON(b
>= a
&& blen
< alen
);
154 BUG_ON(b
>= c
&& blen
< clen
);
155 BUG_ON(c
>= a
&& clen
< alen
);
156 BUG_ON(c
>= b
&& clen
< blen
);
158 if (alen
== 0 || blen
== 0)
163 if (alen
+ blen
<= 64)
166 if (a
>= b
&& alen
> clen
+ 16)
167 return mul_saturated(a
/c
, b
);
168 else if (a
< b
&& blen
> clen
+ 16)
169 return mul_saturated(b
/c
, a
);
171 while (alen
+ blen
> 64) {
172 if (alen
> blen
|| (alen
== blen
&& a
> b
)) {
186 static inline int hdr_size(void)
188 return ((sizeof(struct cell_hdr
) + sizeof(void *) - 1) / sizeof(void *)
192 static inline int elements_per_cell(int cell_size
)
194 return (cell_size
- hdr_size())/sizeof(void *);
197 static inline struct cell_hdr
*cell_addr(struct htable
*ht
, __u32 id
)
199 int idx
= (id
%ht
->htable_size
) / (elements_per_cell(ht
->cell_size
));
200 return (struct cell_hdr
*) (((char *)ht
->htable
) + ht
->cell_size
* idx
);
203 static inline char **element_addr(struct htable
*ht
, __u32 id
)
205 int idx
= (id
%ht
->htable_size
) % (elements_per_cell(ht
->cell_size
));
207 ( ((char *)cell_addr(ht
, id
)) +
208 hdr_size() + idx
*sizeof(void *) );
212 static inline char **next_element(struct htable
*ht
, char *element
)
214 return (char **)(element
+ ht
->entry_offset
);
217 static inline struct kref
*element_kref(struct htable
*ht
, char *element
)
219 return (struct kref
*)(element
+ ht
->kref_offset
);
223 static inline void unlock_element(struct htable
*ht
, __u32 key
)
225 struct cell_hdr
*hdr
= cell_addr(ht
, key
);
226 spin_unlock( &(hdr
->lock
) );
230 static char **get_element_nounlock(struct htable
*ht
, __u32 key
,
233 struct cell_hdr
*hdr
= cell_addr(ht
, key
);
234 char **element
= element_addr(ht
, key
);
236 BUG_ON(0 == element
);
238 spin_lock( &(hdr
->lock
) );
243 if (searcheditem
!= 0 && ht
->matches(*element
, searcheditem
))
245 element
= next_element(ht
, *element
);
251 char *htable_get(struct htable
*ht
, __u32 key
, void *searcheditem
)
253 unsigned long iflags
;
256 if (unlikely(ht
->htable
== 0))
259 local_irq_save(iflags
);
260 element
= *(get_element_nounlock(ht
, key
, searcheditem
));
261 if (likely(element
!= 0))
262 kref_get(element_kref(ht
, element
));
263 unlock_element(ht
, key
);
264 local_irq_restore(iflags
);
269 int htable_delete(struct htable
*ht
, __u32 key
,
270 void *searcheditem
, void (*free
) (struct kref
*ref
))
272 unsigned long iflags
;
277 if (unlikely(ht
->htable
== 0))
280 local_irq_save(iflags
);
282 element
= get_element_nounlock(ht
, key
, searcheditem
);
283 BUG_ON(0 == element
);
285 if (unlikely(*element
== 0)) {
286 /* key not in table */
291 next
= next_element(ht
, *element
);
292 kref_put(element_kref(ht
, *element
), free
);
296 unlock_element(ht
, key
);
297 local_irq_restore(iflags
);
302 void htable_insert(struct htable
*ht
, char *newelement
, __u32 key
)
304 unsigned long iflags
;
307 if (unlikely(ht
->htable
== 0))
310 BUG_ON(*next_element(ht
, newelement
) != 0);
311 local_irq_save(iflags
);
313 element
= get_element_nounlock(ht
, key
, 0);
315 BUG_ON(element
== 0);
316 BUG_ON(*element
!= 0);
318 *element
= newelement
;
319 kref_get(element_kref(ht
, newelement
));
321 unlock_element(ht
, key
);
322 local_irq_restore(iflags
);
326 void htable_init(struct htable
*ht
, int (*matches
)(void *htentry
,
327 void *searcheditem
), __u32 entry_offset
, __u32 kref_offset
)
334 ht
->htable
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
335 if (unlikely(ht
->htable
== 0)) {
336 printk(KERN_CRIT
"cor: error allocating htable (out of "
340 memset(ht
->htable
, 0, PAGE_SIZE
);
343 num_cells
= PAGE_SIZE
/ht
->cell_size
;
345 for (j
=0;j
<num_cells
;j
++) {
346 struct cell_hdr
*hdr
= (struct cell_hdr
*)
347 ( ((char *) ht
->htable
) + j
* ht
->cell_size
);
348 spin_lock_init(&(hdr
->lock
));
351 ht
->htable_size
= num_cells
* elements_per_cell(ht
->cell_size
);
352 ht
->num_elements
= 0;
354 ht
->matches
= matches
;
355 ht
->entry_offset
= entry_offset
;
356 ht
->kref_offset
= kref_offset
;
359 struct reverse_connid_matchparam
{
364 static __u32
rcm_to_key(struct reverse_connid_matchparam
*rcm
)
366 return (((__u32
) rcm
->nb
) ^ rcm
->conn_id
);
369 static int matches_reverse_connid(void *htentry
, void *searcheditem
)
371 struct conn
*conn
= (struct conn
*) htentry
;
372 struct reverse_connid_matchparam
*rcm
=
373 (struct reverse_connid_matchparam
*) searcheditem
;
374 BUG_ON(conn
->targettype
!= TARGET_OUT
);
375 return (conn
->target
.out
.nb
== rcm
->nb
) &&
376 (conn
->target
.out
.conn_id
== rcm
->conn_id
);
379 struct conn
*get_conn_reverse(struct neighbor
*nb
, __u32 conn_id
)
381 struct reverse_connid_matchparam rcm
;
383 rcm
.conn_id
= conn_id
;
385 return (struct conn
*) htable_get(&reverse_connid_table
,
386 rcm_to_key(&rcm
), &rcm
);
389 void insert_reverse_connid(struct conn
*rconn
)
391 struct reverse_connid_matchparam rcm
;
393 BUG_ON(rconn
->targettype
!= TARGET_OUT
);
395 rcm
.nb
= rconn
->target
.out
.nb
;
396 rcm
.conn_id
= rconn
->target
.out
.conn_id
;
397 htable_insert(&reverse_connid_table
, (char *) rconn
, rcm_to_key(&rcm
));
400 struct conn
*get_conn(__u32 conn_id
)
402 return (struct conn
*) htable_get(&connid_table
, conn_id
, &conn_id
);
405 static int connid_alloc(struct conn
*sconn
)
410 BUG_ON(sconn
->sourcetype
!= SOURCE_IN
);
412 mutex_lock(&connid_gen
);
417 get_random_bytes((char *) &conn_id
, sizeof(conn_id
));
419 if (unlikely(conn_id
== 0))
422 tmp
= get_conn(conn_id
);
423 if (unlikely(tmp
!= 0)) {
424 kref_put(&(tmp
->ref
), free_conn
);
430 mutex_unlock(&connid_gen
);
435 sconn
->source
.in
.conn_id
= conn_id
;
436 htable_insert(&connid_table
, (char *) sconn
, conn_id
);
437 mutex_unlock(&connid_gen
);
441 void free_conn(struct kref
*ref
)
443 unsigned long iflags
;
444 struct conn
*conn
= container_of(ref
, struct conn
, ref
);
445 struct conn
*reversedir
= 0;
447 BUG_ON(atomic_read(&(conn
->isreset
)) == 0);
449 spin_lock_irqsave(&conn_free
, iflags
);
451 if (conn
->reversedir
!= 0)
452 atomic_set(&(conn
->reversedir
->isreset
), 3);
454 if (atomic_read(&(conn
->isreset
)) != 3)
457 if (conn
->reversedir
!= 0) {
458 conn
->reversedir
->reversedir
= 0;
459 reversedir
= conn
->reversedir
;
460 conn
->reversedir
= 0;
463 if (conn
->sourcetype
== SOURCE_IN
) {
464 kref_put(&(conn
->source
.in
.nb
->ref
), neighbor_free
);
465 conn
->source
.in
.nb
= 0;
468 if (conn
->targettype
== TARGET_OUT
) {
469 kref_put(&(conn
->target
.out
.nb
->ref
), neighbor_free
);
470 conn
->target
.out
.nb
= 0;
473 BUG_ON(conn
->data_buf
.totalsize
!= 0);
474 BUG_ON(conn
->data_buf
.overhead
!= 0);
476 kmem_cache_free(conn_slab
, conn
);
479 spin_unlock_irqrestore(&conn_free
, iflags
);
482 free_conn(&(reversedir
->ref
));
486 * rconn ==> the connection we received the commend from
487 * ==> init rconn->target.out + rconn->reversedir->source.in
489 * rc == 1 ==> connid allocation failed
491 * NOTE: call to this func *must* be protected by rcv_lock
493 int conn_init_out(struct conn
*rconn
, struct neighbor
*nb
)
496 struct conn
*sconn
= rconn
->reversedir
;
498 mutex_lock(&(sconn
->rcv_lock
));
500 BUG_ON(rconn
->targettype
!= TARGET_UNCONNECTED
);
502 BUG_ON(sconn
->sourcetype
!= SOURCE_NONE
);
504 memset(&(rconn
->target
.out
), 0, sizeof(rconn
->target
.out
));
505 memset(&(sconn
->source
.in
), 0, sizeof(sconn
->source
.in
));
507 rconn
->targettype
= TARGET_OUT
;
508 sconn
->sourcetype
= SOURCE_IN
;
510 rconn
->target
.out
.nb
= nb
;
511 sconn
->source
.in
.nb
= nb
;
513 /* neighbor pointer */
514 kref_get(&(nb
->ref
));
515 kref_get(&(nb
->ref
));
517 skb_queue_head_init(&(sconn
->source
.in
.reorder_queue
));
518 atomic_set(&(sconn
->source
.in
.pong_awaiting
), 0);
520 if (unlikely(connid_alloc(sconn
))) {
525 INIT_LIST_HEAD(&(rconn
->target
.out
.retrans_list
));
527 reset_seqno(rconn
, 0);
528 get_random_bytes((char *) &(sconn
->source
.in
.next_seqno
),
529 sizeof(sconn
->source
.in
.next_seqno
));
531 mutex_lock(&(nb
->conn_list_lock
));
532 list_add_tail(&(sconn
->source
.in
.nb_list
), &(nb
->rcv_conn_list
));
533 list_add_tail(&(rconn
->target
.out
.nb_list
), &(nb
->snd_conn_list
));
534 nb
->num_send_conns
++;
535 mutex_unlock(&(nb
->conn_list_lock
));
538 kref_get(&(rconn
->ref
));
539 kref_get(&(sconn
->ref
));
541 atomic_inc(&num_conns
);
544 mutex_unlock(&(sconn
->rcv_lock
));
546 #warning todo initial send credits
551 void conn_init_sock_source(struct conn
*conn
)
554 conn
->sourcetype
= SOURCE_SOCK
;
555 memset(&(conn
->source
.sock
), 0, sizeof(conn
->source
.sock
));
556 init_waitqueue_head(&(conn
->source
.sock
.wait
));
559 void conn_init_sock_target(struct conn
*conn
)
562 conn
->targettype
= TARGET_SOCK
;
563 memset(&(conn
->target
.sock
), 0, sizeof(conn
->target
.sock
));
564 init_waitqueue_head(&(conn
->target
.sock
.wait
));
565 reset_seqno(conn
, 0);
568 struct conn
* alloc_conn(gfp_t allocflags
)
570 struct conn
*rconn
= 0;
571 struct conn
*sconn
= 0;
573 rconn
= kmem_cache_alloc(conn_slab
, allocflags
);
574 if (unlikely(rconn
== 0))
577 sconn
= kmem_cache_alloc(conn_slab
, allocflags
);
578 if (unlikely(sconn
== 0))
581 memset(rconn
, 0, sizeof(struct conn
));
582 memset(sconn
, 0, sizeof(struct conn
));
584 rconn
->reversedir
= sconn
;
585 sconn
->reversedir
= rconn
;
587 kref_init(&(rconn
->ref
));
588 kref_init(&(sconn
->ref
));
590 rconn
->sockstate
= SOCKSTATE_CONN
;
591 sconn
->sockstate
= SOCKSTATE_CONN
;
593 rconn
->sourcetype
= SOURCE_NONE
;
594 sconn
->sourcetype
= SOURCE_NONE
;
595 rconn
->targettype
= TARGET_UNCONNECTED
;
596 sconn
->targettype
= TARGET_UNCONNECTED
;
598 atomic_set(&(rconn
->isreset
), 0);
599 atomic_set(&(sconn
->isreset
), 0);
601 mutex_init(&(rconn
->rcv_lock
));
602 mutex_init(&(sconn
->rcv_lock
));
604 rconn
->jiffies_credit_update
= jiffies
;
605 rconn
->jiffies_credit_update
= rconn
->jiffies_credit_update
;
607 rconn
->crate_forward
= ((1 << 31) - (((__u32
) 1 << 31) / 10));
608 sconn
->crate_forward
= ((1 << 31) - (((__u32
) 1 << 31) / 10));
616 kmem_cache_free(conn_slab
, rconn
);
621 static struct connlistener
*get_connlistener(__be64 port
)
623 struct list_head
*curr
= openports
.next
;
625 while (curr
!= &openports
) {
626 struct bindnode
*currnode
= ((struct bindnode
*)
627 (((char *)curr
) - offsetof(struct bindnode
, lh
)));
628 if (currnode
->port
== port
) {
629 BUG_ON(currnode
->owner
== 0);
630 return currnode
->owner
;
639 void close_port(struct connlistener
*listener
)
641 mutex_lock(&cor_bindnodes
);
643 if (listener
->bn
!= 0) {
644 list_del(&(listener
->bn
->lh
));
645 kmem_cache_free(bindnode_slab
, listener
->bn
);
649 while (list_empty(&(listener
->conn_queue
)) == 0) {
650 struct conn
*rconn
= container_of(listener
->conn_queue
.next
,
651 struct conn
, source
.sock
.cl_list
);
652 list_del(&(rconn
->source
.sock
.cl_list
));
653 atomic_cmpxchg(&(rconn
->reversedir
->isreset
), 0, 1);
655 kref_put(&(rconn
->ref
), free_conn
);
658 kmem_cache_free(connlistener_slab
, listener
);
660 mutex_unlock(&cor_bindnodes
);
663 struct connlistener
*open_port(__be64 port
)
666 struct bindnode
*bn
= 0;
667 struct connlistener
*listener
= 0;
669 mutex_lock(&cor_bindnodes
);
670 if (get_connlistener(port
) != 0)
674 bn
= kmem_cache_alloc(bindnode_slab
, GFP_KERNEL
);
675 listener
= kmem_cache_alloc(connlistener_slab
, GFP_KERNEL
);
677 memset(bn
, 0, sizeof(struct bindnode
));
678 memset(listener
, 0, sizeof(struct connlistener
));
680 bn
->owner
= listener
;
683 /* kref is not actually used */
684 listener
->sockstate
= SOCKSTATE_LISTENER
;
686 mutex_init(&(listener
->lock
));
687 INIT_LIST_HEAD(&(listener
->conn_queue
));
688 init_waitqueue_head(&(listener
->wait
));
690 list_add_tail((struct list_head
*) &(bn
->lh
), &openports
);
693 mutex_unlock(&cor_bindnodes
);
700 * rc == 2 port not open
701 * rc == 3 listener queue full
703 int connect_port(struct conn
*rconn
, __be64 port
)
706 struct connlistener
*listener
;
709 mutex_lock(&cor_bindnodes
);
711 listener
= get_connlistener(port
);
717 mutex_lock(&(listener
->lock
));
719 if (unlikely(listener
->queue_len
>= listener
->queue_maxlen
)) {
720 if (listener
->queue_maxlen
<= 0)
728 kref_get(&(rconn
->reversedir
->ref
));
730 mutex_lock(&(rconn
->rcv_lock
));
731 mutex_lock(&(rconn
->reversedir
->rcv_lock
));
732 conn_init_sock_target(rconn
);
733 conn_init_sock_source(rconn
->reversedir
);
734 mutex_unlock(&(rconn
->reversedir
->rcv_lock
));
735 mutex_unlock(&(rconn
->rcv_lock
));
737 list_add_tail(&(rconn
->reversedir
->source
.sock
.cl_list
),
738 &(listener
->conn_queue
));
739 listener
->queue_len
++;
740 wake_up_interruptible(&(listener
->wait
));
743 mutex_unlock(&(listener
->lock
));
746 mutex_unlock(&cor_bindnodes
);
752 * rc == 2 addrtype not found
753 * rc == 3 addr not found
754 * rc == 4 ==> connid allocation failed
755 * rc == 5 ==> control msg alloc failed
757 int connect_neigh(struct conn
*rconn
,
758 __u16 addrtypelen
, __u8
*addrtype
,
759 __u16 addrlen
, __u8
*addr
)
762 struct control_msg_out
*cm
;
763 struct neighbor
*nb
= find_neigh(addrtypelen
, addrtype
, addrlen
, addr
);
767 if (unlikely(conn_init_out(rconn
, nb
))) {
772 cm
= alloc_control_msg(nb
, ACM_PRIORITY_HIGH
);
773 if (unlikely(cm
== 0)) {
778 send_connect_nb(cm
, rconn
->reversedir
->source
.in
.conn_id
,
779 rconn
->reversedir
->source
.in
.next_seqno
,
783 kref_put(&(nb
->ref
), neighbor_free
);
788 void reset_ping(struct conn
*rconn
)
790 struct neighbor
*nb
= rconn
->source
.in
.nb
;
791 if (atomic_read(&(rconn
->source
.in
.pong_awaiting
)) != 0) {
792 mutex_lock(&(nb
->conn_list_lock
));
793 if (atomic_read(&(rconn
->source
.in
.pong_awaiting
)) == 0)
796 atomic_set(&(rconn
->source
.in
.pong_awaiting
), 0);
797 nb
->pong_conns_expected
--;
799 mutex_unlock(&(nb
->conn_list_lock
));
803 static int _reset_conn(struct conn
*conn
)
806 * aktive conns have an additional ref to make sure that they are not
807 * freed when only one direction is referenced by the connid hashtable
810 int isreset
= atomic_cmpxchg(&(conn
->isreset
), 0, 2);
812 isreset
= atomic_cmpxchg(&(conn
->isreset
), 1, 2);
814 if (isreset
== 2 || isreset
== 3)
817 /* lock sourcetype/targettype */
818 mutex_lock(&(conn
->rcv_lock
));
820 if (conn
->sourcetype
== SOURCE_IN
) {
821 mutex_lock(&(conn
->source
.in
.nb
->conn_list_lock
));
822 list_del(&(conn
->source
.in
.nb_list
));
823 mutex_unlock(&(conn
->source
.in
.nb
->conn_list_lock
));
827 if (conn
->source
.in
.conn_id
!= 0) {
828 if (htable_delete(&connid_table
,
829 conn
->source
.in
.conn_id
,
830 &(conn
->source
.in
.conn_id
), free_conn
)){
831 printk(KERN_ERR
"error in _reset_conn: "
832 "htable_delete src_in failed");
834 conn
->source
.in
.conn_id
= 0;
839 atomic_dec(&num_conns
);
840 BUG_ON(atomic_read(&num_conns
) < 0);
841 } else if (conn
->sourcetype
== SOURCE_SOCK
) {
842 wake_up_interruptible(&(conn
->source
.sock
.wait
));
845 if (conn
->targettype
== TARGET_UNCONNECTED
) {
846 connreset_cpacket_buffer(conn
);
847 } else if (conn
->targettype
== TARGET_OUT
) {
848 mutex_lock(&(conn
->target
.out
.nb
->conn_list_lock
));
849 list_del(&(conn
->target
.out
.nb_list
));
850 conn
->target
.out
.nb
->num_send_conns
--;
851 BUG_ON(conn
->target
.out
.nb
->num_send_conns
< 0);
852 mutex_unlock(&(conn
->target
.out
.nb
->conn_list_lock
));
856 if (conn
->target
.out
.conn_id
!= 0) {
857 struct reverse_connid_matchparam rcm
;
858 rcm
.nb
= conn
->target
.out
.nb
;
859 rcm
.conn_id
= conn
->target
.out
.conn_id
;
860 if (htable_delete(&reverse_connid_table
,
863 printk(KERN_ERR
"error in _reset_conn: "
864 "htable_delete target_out "
869 if (isreset
== 0 && conn
->target
.out
.conn_id
!= 0) {
870 struct control_msg_out
*cm
= alloc_control_msg(
871 conn
->target
.out
.nb
, ACM_PRIORITY_HIGH
);
872 if (unlikely(cm
== 0))
873 send_ping_all_conns(conn
->target
.out
.nb
);
875 send_reset_conn(cm
, conn
->target
.out
.conn_id
);
878 conn
->target
.out
.conn_id
= 0;
880 cancel_retrans(conn
);
881 } else if (conn
->targettype
== TARGET_SOCK
) {
882 wake_up_interruptible(&(conn
->target
.sock
.wait
));
887 mutex_unlock(&(conn
->rcv_lock
));
889 qos_remove_conn(conn
);
890 reset_bufferusage(conn
);
891 unreserve_sock_buffer(conn
);
892 connreset_credits(conn
);
894 mutex_lock(&(conn
->rcv_lock
));
895 if (conn
->sourcetype
!= SOURCE_SOCK
) {
896 mutex_unlock(&(conn
->rcv_lock
));
897 goto skipbufferlimits
;
899 mutex_unlock(&(conn
->rcv_lock
));
901 mutex_lock(&sock_bufferlimits_lock
);
902 mutex_lock(&(conn
->rcv_lock
));
903 if (conn
->sourcetype
== SOURCE_SOCK
) {
904 BUG_ON(conn
->source
.sock
.in_alwait_list
);
905 conn
->source
.sock
.sbt
->usage
-= conn
->source
.sock
.alloclimit
;
906 if (conn
->source
.sock
.delay_flush
) {
907 conn
->source
.sock
.delay_flush
= 0;
908 list_del(&(conn
->source
.sock
.delflush_list
));
910 kref_put(&(conn
->source
.sock
.sbt
->ref
), free_sbt
);
911 conn
->source
.sock
.sbt
= 0;
913 mutex_unlock(&(conn
->rcv_lock
));
914 mutex_unlock(&sock_bufferlimits_lock
);
920 /* warning: do not hold the rcv_lock while calling this! */
921 void reset_conn(struct conn
*conn
)
923 int put1
= _reset_conn(conn
);
924 int put2
= _reset_conn(conn
->reversedir
);
926 /* free_conn may not be called, before both _reset_conn have finished */
928 kref_put(&(conn
->ref
), free_conn
);
933 kref_put(&(conn
->reversedir
->ref
), free_conn
);
938 static int matches_connid_in(void *htentry
, void *searcheditem
)
940 struct conn
*conn
= (struct conn
*) htentry
;
941 __u32 conn_id
= *((__u32
*) searcheditem
);
942 BUG_ON(conn
->sourcetype
!= SOURCE_IN
);
943 return (conn
->source
.in
.conn_id
== conn_id
);
946 static int __init
cor_common_init(void)
952 printk(KERN_ERR
"sizeof conn: %d", sizeof(c
));
953 printk(KERN_ERR
" conn.source: %d", sizeof(c
.source
));
954 printk(KERN_ERR
" conn.target: %d", sizeof(c
.target
));
955 printk(KERN_ERR
" conn.target.out: %d", sizeof(c
.target
.out
));
956 printk(KERN_ERR
" conn.buf: %d", sizeof(c
.data_buf
));
958 printk(KERN_ERR
" mutex: %d", sizeof(struct mutex
));
959 printk(KERN_ERR
" spinlock: %d", sizeof(spinlock_t
));
960 printk(KERN_ERR
" kref: %d", sizeof(struct kref
));
962 conn_slab
= kmem_cache_create("cor_conn", sizeof(struct conn
), 8, 0, 0);
963 htable_init(&connid_table
, matches_connid_in
,
964 offsetof(struct conn
, source
.in
.htab_entry
),
965 offsetof(struct conn
, ref
));
967 htable_init(&reverse_connid_table
, matches_reverse_connid
,
968 offsetof(struct conn
, target
.out
.htab_entry
),
969 offsetof(struct conn
, ref
));
971 bindnode_slab
= kmem_cache_create("cor_bindnode",
972 sizeof(struct bindnode
), 8, 0, 0);
973 connlistener_slab
= kmem_cache_create("cor_connlistener",
974 sizeof(struct connlistener
), 8, 0, 0);
976 atomic_set(&num_conns
, 0);
984 if (unlikely(rc
!= 0))
987 rc
= cor_neighbor_init();
988 if (unlikely(rc
!= 0))
992 if (unlikely(rc
!= 0))
998 module_init(cor_common_init
);
999 MODULE_LICENSE("GPL");