2 * Connection oriented routing
3 * Copyright (C) 2007-2008 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/mutex.h>
25 DEFINE_MUTEX(cor_bindnodes
);
26 DEFINE_MUTEX(conn_free
);
28 DEFINE_MUTEX(connid_gen
);
37 struct kmem_cache
*conn_slab
;
39 struct htable connid_table
;
41 struct kmem_cache
*bindnode_slab
;
42 struct kmem_cache
*connlistener_slab
;
44 void ref_counter_decr(struct ref_counter
*cnt
)
49 BUG_ON(0 == cnt
->def
);
51 spin_lock_irqsave(&(cnt
->lock
), iflags
);
53 if (unlikely(cnt
->refs
== 0)) {
55 local_irq_restore(iflags
);
58 spin_unlock_irqrestore(&(cnt
->lock
), iflags
);
61 int ref_counter_incr(struct ref_counter
*cnt
)
66 spin_lock_irqsave(&(cnt
->lock
), iflags
);
69 spin_unlock_irqrestore(&(cnt
->lock
), iflags
);
74 void ref_counter_init(struct ref_counter
*cnt
, struct ref_counter_def
*def
)
79 spin_lock_init(&(cnt
->lock
));
85 static inline int hdr_size(void)
87 return ((sizeof(struct cell_hdr
) + sizeof(void *) - 1) / sizeof(void *)
91 static inline int elements_per_cell(int cell_size
)
93 return (cell_size
- hdr_size())/sizeof(void *);
96 static inline struct cell_hdr
*cell_addr(struct htable
*ht
, __u32 id
)
98 int idx
= (id
%ht
->htable_size
) / (elements_per_cell(ht
->cell_size
));
99 return (struct cell_hdr
*) (((char *)ht
->htable
) + ht
->cell_size
* idx
);
102 static inline char **element_addr(struct htable
*ht
, __u32 id
)
104 int idx
= (id
%ht
->htable_size
) % (elements_per_cell(ht
->cell_size
));
106 ( ((char *)cell_addr(ht
, id
)) +
107 hdr_size() + idx
*sizeof(void *) );
111 static inline char **next_element(struct htable
*ht
, char *element
)
113 return (char **)(element
+ ht
->entry_offset
);
116 static inline struct ref_counter
*element_refcnt(struct htable
*ht
, char *element
)
118 return (struct ref_counter
*)(element
+ ht
->ref_counter_offset
);
122 static inline void unlock_element(struct htable
*ht
, __u32 key
)
124 struct cell_hdr
*hdr
= cell_addr(ht
, key
);
125 spin_unlock( &(hdr
->lock
) );
129 static char **get_element_nounlock(struct htable
*ht
, __u32 key
,
132 struct cell_hdr
*hdr
= cell_addr(ht
, key
);
133 char **element
= element_addr(ht
, key
);
135 BUG_ON(0 == element
);
137 spin_lock( &(hdr
->lock
) );
142 if (searcheditem
!= 0 && ht
->matches(*element
, searcheditem
))
144 element
= next_element(ht
, *element
);
150 char *htable_get(struct htable
*ht
, __u32 key
, void *searcheditem
)
152 unsigned long iflags
;
155 printk(KERN_ERR
"get %d", key
);
160 local_irq_save(iflags
);
161 element
= *(get_element_nounlock(ht
, key
, searcheditem
));
163 ref_counter_incr(element_refcnt(ht
, element
));
164 unlock_element(ht
, key
);
165 local_irq_restore(iflags
);
170 int htable_delete(struct htable
*ht
, __u32 key
,
173 unsigned long iflags
;
178 printk(KERN_ERR
"des %d", key
);
183 local_irq_save(iflags
);
185 element
= get_element_nounlock(ht
, key
, searcheditem
);
186 BUG_ON(0 == element
);
189 /* key not in table */
194 next
= next_element(ht
, *element
);
195 ref_counter_decr(element_refcnt(ht
, *element
));
199 unlock_element(ht
, key
);
200 local_irq_restore(iflags
);
205 void htable_insert(struct htable
*ht
, char *newelement
, __u32 key
)
207 unsigned long iflags
;
210 printk(KERN_ERR
"insert %d %d", (int) newelement
, key
);
215 BUG_ON(*next_element(ht
, newelement
) != 0);
216 local_irq_save(iflags
);
218 element
= get_element_nounlock(ht
, key
, 0);
220 BUG_ON(element
== 0);
221 BUG_ON(*element
!= 0);
223 *element
= newelement
;
224 ref_counter_incr(element_refcnt(ht
, newelement
));
226 unlock_element(ht
, key
);
227 local_irq_restore(iflags
);
231 void htable_init(struct htable
*ht
, int (*matches
)(void *htentry
,
232 void *searcheditem
), __u32 entry_offset
, __u32 ref_counter_offset
)
239 ht
->htable
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
240 if (ht
->htable
== 0) {
241 printk(KERN_CRIT
"cor: error allocating htable (out of "
245 memset(ht
->htable
, 0, PAGE_SIZE
);
248 num_cells
= PAGE_SIZE
/ht
->cell_size
;
250 for (j
=0;j
<num_cells
;j
++) {
251 struct cell_hdr
*hdr
= (struct cell_hdr
*)
252 ( ((char *) ht
->htable
) + j
* ht
->cell_size
);
253 spin_lock_init(&(hdr
->lock
));
256 ht
->htable_size
= num_cells
* elements_per_cell(ht
->cell_size
);
257 ht
->num_elements
= 0;
259 ht
->matches
= matches
;
260 ht
->entry_offset
= entry_offset
;
261 ht
->ref_counter_offset
= ref_counter_offset
;
264 struct conn
*get_conn(__u32 conn_id
)
266 return (struct conn
*) htable_get(&connid_table
, conn_id
, &conn_id
);
269 static int connid_alloc(struct conn
*sconn
)
274 BUG_ON(sconn
->sourcetype
!= SOURCE_IN
);
276 mutex_lock(&connid_gen
);
281 get_random_bytes((char *) &conn_id
, sizeof(conn_id
));
286 tmp
= get_conn(conn_id
);
288 ref_counter_decr(&(tmp
->refs
));
294 mutex_unlock(&connid_gen
);
299 sconn
->source
.in
.conn_id
= conn_id
;
300 htable_insert(&connid_table
, (char *) sconn
, conn_id
);
301 mutex_unlock(&connid_gen
);
305 static void free_conn(struct ref_counter
*cnt
)
307 struct conn
*conn
= container_of(cnt
, struct conn
, refs
);
309 BUG_ON(conn
->isreset
== 0);
311 mutex_lock(&conn_free
);
313 if (conn
->isreset
!= 3) {
314 conn
->reversedir
->isreset
= 3;
318 if (conn
->reversedir
!= 0) {
319 conn
->reversedir
->reversedir
= 0;
320 free_conn(&(conn
->reversedir
->refs
));
321 conn
->reversedir
= 0;
324 if (conn
->sourcetype
== SOURCE_IN
) {
325 ref_counter_decr(&(conn
->source
.in
.nb
->refs
));
326 conn
->source
.in
.nb
= 0;
329 if (conn
->targettype
== TARGET_OUT
) {
330 ref_counter_decr(&(conn
->target
.out
.nb
->refs
));
331 conn
->target
.out
.nb
= 0;
334 databuf_free(&(conn
->buf
));
336 kmem_cache_free(conn_slab
, conn
);
339 mutex_unlock(&conn_free
);
342 static struct ref_counter_def conn_refcnt
= {
347 * rconn ==> the connection we received the commend from
348 * ==> init rconn->target.out + rconn->reversedir->source.in
350 * rc == 1 ==> connid allocation failed
352 int conn_init_out(struct conn
*rconn
, struct neighbor
*nb
)
354 struct conn
*sconn
= rconn
->reversedir
;
356 BUG_ON(TARGET_UNCONNECTED
!= rconn
->targettype
);
358 BUG_ON(SOURCE_NONE
!= sconn
->sourcetype
);
360 memset(&(rconn
->target
.out
), 0, sizeof(rconn
->target
.out
));
361 memset(&(sconn
->source
.in
), 0, sizeof(sconn
->source
.in
));
363 rconn
->targettype
= TARGET_OUT
;
364 sconn
->sourcetype
= SOURCE_IN
;
366 rconn
->target
.out
.nb
= nb
;
367 sconn
->source
.in
.nb
= nb
;
369 skb_queue_head_init(&(sconn
->source
.in
.reorder_queue
));
372 * connid_alloc has to be called last, because packets may be received
373 * immediately after its execution
375 if (connid_alloc(sconn
))
378 mutex_lock(&(nb
->conn_list_lock
));
379 list_add_tail(&(sconn
->source
.in
.nb_list
), &(nb
->rcv_conn_list
));
380 list_add_tail(&(rconn
->target
.out
.nb_list
), &(nb
->snd_conn_list
));
381 mutex_unlock(&(nb
->conn_list_lock
));
384 ref_counter_incr(&(rconn
->refs
));
385 ref_counter_incr(&(sconn
->refs
));
390 void conn_init_sock_source(struct conn
*conn
)
394 conn
->sourcetype
= SOURCE_SOCK
;
396 memset(&(conn
->source
.sock
), 0, sizeof(conn
->source
.sock
));
398 init_waitqueue_head(&(conn
->source
.sock
.wait
));
401 void conn_init_sock_target(struct conn
*conn
)
404 conn
->targettype
= TARGET_SOCK
;
405 init_waitqueue_head(&(conn
->target
.sock
.wait
));
408 struct conn
* alloc_conn(gfp_t allocflags
)
410 struct conn
*rconn
= 0;
411 struct conn
*sconn
= 0;
413 rconn
= kmem_cache_alloc(conn_slab
, allocflags
);
414 if (unlikely(0 == rconn
))
417 sconn
= kmem_cache_alloc(conn_slab
, allocflags
);
418 if (unlikely(0 == sconn
))
421 memset(rconn
, 0, sizeof(struct conn
));
422 memset(sconn
, 0, sizeof(struct conn
));
424 rconn
->reversedir
= sconn
;
425 sconn
->reversedir
= rconn
;
427 ref_counter_init(&(rconn
->refs
), &conn_refcnt
);
428 ref_counter_init(&(sconn
->refs
), &conn_refcnt
);
430 mutex_init(&(rconn
->rcv_lock
));
431 mutex_init(&(sconn
->rcv_lock
));
433 rconn
->sockstate
= SOCKSTATE_CONN
;
434 sconn
->sockstate
= SOCKSTATE_CONN
;
436 databuf_init(&(rconn
->buf
));
437 databuf_init(&(sconn
->buf
));
442 kmem_cache_free(conn_slab
, rconn
);
447 static struct connlistener
*get_connlistener(__be64 port
)
449 struct list_head
*curr
= openports
.next
;
451 while (curr
!= &openports
) {
452 struct bindnode
*currnode
= ((struct bindnode
*)
453 (((char *)curr
) - offsetof(struct bindnode
, lh
)));
454 if (currnode
->port
== port
) {
455 BUG_ON(currnode
->owner
== 0);
456 return currnode
->owner
;
465 void close_port(struct connlistener
*listener
)
467 mutex_lock(&cor_bindnodes
);
469 if (listener
->bn
!= 0) {
470 list_del(&(listener
->bn
->lh
));
471 kmem_cache_free(bindnode_slab
, listener
->bn
);
475 while (list_empty(&(listener
->conn_queue
)) == 0) {
476 struct conn
*rconn
= container_of(listener
->conn_queue
.next
,
477 struct conn
, source
.sock
.cl_list
);
478 rconn
->reversedir
->isreset
= 1;
480 list_del(&(rconn
->source
.sock
.cl_list
));
481 ref_counter_decr(&(rconn
->refs
));
484 kmem_cache_free(connlistener_slab
, listener
);
486 mutex_unlock(&cor_bindnodes
);
489 struct connlistener
*open_port(__be64 port
)
492 struct bindnode
*bn
= 0;
493 struct connlistener
*listener
= 0;
495 mutex_lock(&cor_bindnodes
);
496 if (get_connlistener(port
) != 0)
500 bn
= kmem_cache_alloc(bindnode_slab
, GFP_KERNEL
);
501 listener
= kmem_cache_alloc(connlistener_slab
, GFP_KERNEL
);
503 memset(bn
, 0, sizeof(struct bindnode
));
504 memset(listener
, 0, sizeof(struct connlistener
));
506 bn
->owner
= listener
;
509 /* refcounter is not actually used */
510 listener
->sockstate
= SOCKSTATE_LISTENER
;
512 mutex_init(&(listener
->lock
));
513 INIT_LIST_HEAD(&(listener
->conn_queue
));
514 init_waitqueue_head(&(listener
->wait
));
516 list_add_tail((struct list_head
*) &(bn
->lh
), &openports
);
519 mutex_unlock(&cor_bindnodes
);
526 * rc == 2 port not open
527 * rc == 3 listener queue full
529 int connect_port(struct conn
*rconn
, __be64 port
)
532 struct connlistener
*listener
;
535 mutex_lock(&cor_bindnodes
);
537 listener
= get_connlistener(port
);
543 mutex_lock(&(listener
->lock
));
545 if (listener
->queue_len
>= listener
->queue_maxlen
) {
546 if (listener
->queue_maxlen
<= 0)
554 if (ref_counter_incr(&(rconn
->reversedir
->refs
)))
557 conn_init_sock_target(rconn
);
558 conn_init_sock_source(rconn
->reversedir
);
560 list_add_tail(&(rconn
->reversedir
->source
.sock
.cl_list
),
561 &(listener
->conn_queue
));
562 listener
->queue_len
++;
563 wake_up_interruptible(&(listener
->wait
));
566 mutex_unlock(&(listener
->lock
));
569 mutex_unlock(&cor_bindnodes
);
575 * rc == 2 addrtype not found
576 * rc == 3 addr not found
577 * rc == 4 ==> connid allocation failed
578 * rc == 5 ==> control msg alloc failed
580 int connect_neigh(struct conn
*rconn
,
581 __u16 addrtypelen
, __u8
*addrtype
,
582 __u16 addrlen
, __u8
*addr
)
585 struct control_msg_out
*cm
;
586 struct neighbor
*nb
= find_neigh(addrtypelen
, addrtype
, addrlen
, addr
);
589 if (conn_init_out(rconn
, nb
)) {
594 cm
= alloc_control_msg();
595 if (unlikely(cm
== 0)) {
600 send_connect_nb(cm
, nb
, rconn
->reversedir
->source
.in
.conn_id
);
604 ref_counter_decr(&(nb
->refs
));
610 static void _reset_conn(struct conn
*conn
)
612 if (conn
->isreset
== 1)
615 if (conn
->isreset
== 2 || conn
->isreset
== 3)
618 if (conn
->targettype
== TARGET_OUT
&& conn
->target
.out
.conn_id
!= 0) {
619 struct control_msg_out
*cm
= alloc_control_msg();
621 send_reset_conn(cm
, conn
->target
.out
.nb
,
622 conn
->target
.out
.conn_id
);
627 if (conn
->sourcetype
== SOURCE_IN
) {
628 mutex_lock(&(conn
->source
.in
.nb
->conn_list_lock
));
629 list_del(&(conn
->source
.in
.nb_list
));
630 mutex_unlock(&(conn
->source
.in
.nb
->conn_list_lock
));
632 ref_counter_decr(&(conn
->refs
));
634 if (conn
->source
.in
.conn_id
!= 0)
635 htable_delete(&connid_table
, conn
->source
.in
.conn_id
,
636 &(conn
->source
.in
.conn_id
));
639 if (conn
->targettype
== TARGET_OUT
) {
640 mutex_lock(&(conn
->target
.out
.nb
->conn_list_lock
));
641 list_del(&(conn
->target
.out
.nb_list
));
642 mutex_unlock(&(conn
->target
.out
.nb
->conn_list_lock
));
643 ref_counter_decr(&(conn
->refs
));
649 /* warning: do not hold the rcv_lock while calling this! */
650 void reset_conn(struct conn
*conn
)
653 _reset_conn(conn
->reversedir
);
656 static int matches_connid_in(void *htentry
, void *searcheditem
)
658 struct conn
*conn
= (struct conn
*) htentry
;
659 __u32 conn_id
= *((__u32
*) searcheditem
);
660 BUG_ON(conn
->sourcetype
!= SOURCE_IN
);
661 return (conn
->source
.in
.conn_id
== conn_id
);
664 static int __init
cor_common_init(void)
670 printk(KERN_ERR
"sizeof conn: %d", sizeof(c
));
671 printk(KERN_ERR
" conn.source: %d", sizeof(c
.source
));
672 printk(KERN_ERR
" conn.target: %d", sizeof(c
.target
));
673 printk(KERN_ERR
" conn.target.out: %d", sizeof(c
.target
.out
));
674 printk(KERN_ERR
" conn.target.buf: %d", sizeof(c
.buf
));
676 printk(KERN_ERR
" mutex: %d", sizeof(struct mutex
));
677 printk(KERN_ERR
" spinlock: %d", sizeof(spinlock_t
));
678 printk(KERN_ERR
" kref: %d", sizeof(struct kref
));
681 conn_slab
= kmem_cache_create("cor_conn", sizeof(struct conn
), 8, 0, 0);
682 htable_init(&connid_table
, matches_connid_in
,
683 offsetof(struct conn
, source
.in
.htab_entry
),
684 offsetof(struct conn
, refs
));
686 bindnode_slab
= kmem_cache_create("cor_bindnode",
687 sizeof(struct bindnode
), 8, 0, 0);
688 connlistener_slab
= kmem_cache_create("cor_connlistener",
689 sizeof(struct connlistener
), 8, 0, 0);
697 rc
= cor_neighbor_init();
708 module_init(cor_common_init
);
709 MODULE_LICENSE("GPL");