2 * Connection oriented routing
3 * Copyright (C) 2007-2008 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/mutex.h>
25 DEFINE_MUTEX(cor_bindnodes
);
26 DEFINE_MUTEX(conn_free
);
28 DEFINE_MUTEX(connid_gen
);
37 struct kmem_cache
*conn_slab
;
39 struct htable connid_table
;
41 struct kmem_cache
*bindnode_slab
;
42 struct kmem_cache
*connlistener_slab
;
44 static inline int hdr_size(void)
46 return ((sizeof(struct cell_hdr
) + sizeof(void *) - 1) / sizeof(void *)
50 static inline int elements_per_cell(int cell_size
)
52 return (cell_size
- hdr_size())/sizeof(void *);
55 static inline struct cell_hdr
*cell_addr(struct htable
*ht
, __u32 id
)
57 int idx
= (id
%ht
->htable_size
) / (elements_per_cell(ht
->cell_size
));
58 return (struct cell_hdr
*) (((char *)ht
->htable
) + ht
->cell_size
* idx
);
61 static inline char **element_addr(struct htable
*ht
, __u32 id
)
63 int idx
= (id
%ht
->htable_size
) % (elements_per_cell(ht
->cell_size
));
65 ( ((char *)cell_addr(ht
, id
)) +
66 hdr_size() + idx
*sizeof(void *) );
70 static inline char **next_element(struct htable
*ht
, char *element
)
72 return (char **)(element
+ ht
->entry_offset
);
75 static inline struct kref
*element_kref(struct htable
*ht
, char *element
)
77 return (struct kref
*)(element
+ ht
->kref_offset
);
81 static inline void unlock_element(struct htable
*ht
, __u32 key
)
83 struct cell_hdr
*hdr
= cell_addr(ht
, key
);
84 spin_unlock( &(hdr
->lock
) );
88 static char **get_element_nounlock(struct htable
*ht
, __u32 key
,
91 struct cell_hdr
*hdr
= cell_addr(ht
, key
);
92 char **element
= element_addr(ht
, key
);
96 spin_lock( &(hdr
->lock
) );
101 if (searcheditem
!= 0 && ht
->matches(*element
, searcheditem
))
103 element
= next_element(ht
, *element
);
109 char *htable_get(struct htable
*ht
, __u32 key
, void *searcheditem
)
111 unsigned long iflags
;
117 local_irq_save(iflags
);
118 element
= *(get_element_nounlock(ht
, key
, searcheditem
));
120 kref_get(element_kref(ht
, element
));
121 unlock_element(ht
, key
);
122 local_irq_restore(iflags
);
127 int htable_delete(struct htable
*ht
, __u32 key
,
128 void *searcheditem
, void (*free
) (struct kref
*ref
))
130 unsigned long iflags
;
138 local_irq_save(iflags
);
140 element
= get_element_nounlock(ht
, key
, searcheditem
);
141 BUG_ON(0 == element
);
144 /* key not in table */
149 next
= next_element(ht
, *element
);
150 kref_put(element_kref(ht
, *element
), free
);
154 unlock_element(ht
, key
);
155 local_irq_restore(iflags
);
160 void htable_insert(struct htable
*ht
, char *newelement
, __u32 key
)
162 unsigned long iflags
;
168 BUG_ON(*next_element(ht
, newelement
) != 0);
169 local_irq_save(iflags
);
171 element
= get_element_nounlock(ht
, key
, 0);
173 BUG_ON(element
== 0);
174 BUG_ON(*element
!= 0);
176 *element
= newelement
;
177 kref_get(element_kref(ht
, newelement
));
179 unlock_element(ht
, key
);
180 local_irq_restore(iflags
);
184 void htable_init(struct htable
*ht
, int (*matches
)(void *htentry
,
185 void *searcheditem
), __u32 entry_offset
, __u32 kref_offset
)
192 ht
->htable
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
193 if (ht
->htable
== 0) {
194 printk(KERN_CRIT
"cor: error allocating htable (out of "
198 memset(ht
->htable
, 0, PAGE_SIZE
);
201 num_cells
= PAGE_SIZE
/ht
->cell_size
;
203 for (j
=0;j
<num_cells
;j
++) {
204 struct cell_hdr
*hdr
= (struct cell_hdr
*)
205 ( ((char *) ht
->htable
) + j
* ht
->cell_size
);
206 spin_lock_init(&(hdr
->lock
));
209 ht
->htable_size
= num_cells
* elements_per_cell(ht
->cell_size
);
210 ht
->num_elements
= 0;
212 ht
->matches
= matches
;
213 ht
->entry_offset
= entry_offset
;
214 ht
->kref_offset
= kref_offset
;
217 struct conn
*get_conn(__u32 conn_id
)
219 return (struct conn
*) htable_get(&connid_table
, conn_id
, &conn_id
);
222 static int connid_alloc(struct conn
*sconn
)
227 BUG_ON(sconn
->sourcetype
!= SOURCE_IN
);
229 mutex_lock(&connid_gen
);
234 get_random_bytes((char *) &conn_id
, sizeof(conn_id
));
239 tmp
= get_conn(conn_id
);
241 kref_put(&(tmp
->ref
), free_conn
);
247 mutex_unlock(&connid_gen
);
252 sconn
->source
.in
.conn_id
= conn_id
;
253 htable_insert(&connid_table
, (char *) sconn
, conn_id
);
254 mutex_unlock(&connid_gen
);
258 void free_conn(struct kref
*ref
)
260 struct conn
*conn
= container_of(ref
, struct conn
, ref
);
262 BUG_ON(conn
->isreset
== 0);
264 mutex_lock(&conn_free
);
266 if (conn
->isreset
!= 3) {
267 conn
->reversedir
->isreset
= 3;
271 if (conn
->reversedir
!= 0) {
272 conn
->reversedir
->reversedir
= 0;
273 free_conn(&(conn
->reversedir
->ref
));
274 conn
->reversedir
= 0;
277 if (conn
->sourcetype
== SOURCE_IN
) {
278 kref_put(&(conn
->source
.in
.nb
->ref
), neighbor_free
);
279 conn
->source
.in
.nb
= 0;
282 if (conn
->targettype
== TARGET_OUT
) {
283 kref_put(&(conn
->target
.out
.nb
->ref
), neighbor_free
);
284 conn
->target
.out
.nb
= 0;
287 databuf_free(&(conn
->buf
));
289 kmem_cache_free(conn_slab
, conn
);
292 mutex_unlock(&conn_free
);
296 * rconn ==> the connection we received the commend from
297 * ==> init rconn->target.out + rconn->reversedir->source.in
299 * rc == 1 ==> connid allocation failed
301 int conn_init_out(struct conn
*rconn
, struct neighbor
*nb
)
303 struct conn
*sconn
= rconn
->reversedir
;
305 __u32 stall_timeout_ms
= rconn
->target
.unconnected
.stall_timeout_ms
;
307 BUG_ON(rconn
->targettype
!= TARGET_UNCONNECTED
);
309 BUG_ON(sconn
->sourcetype
!= SOURCE_NONE
);
311 memset(&(rconn
->target
.out
), 0, sizeof(rconn
->target
.out
));
312 memset(&(sconn
->source
.in
), 0, sizeof(sconn
->source
.in
));
314 rconn
->targettype
= TARGET_OUT
;
315 sconn
->sourcetype
= SOURCE_IN
;
317 rconn
->target
.out
.nb
= nb
;
318 sconn
->source
.in
.nb
= nb
;
320 rconn
->target
.out
.stall_timeout_ms
= stall_timeout_ms
;
321 skb_queue_head_init(&(sconn
->source
.in
.reorder_queue
));
324 * connid_alloc has to be called last, because packets may be received
325 * immediately after its execution
327 if (connid_alloc(sconn
))
330 mutex_lock(&(nb
->conn_list_lock
));
331 list_add_tail(&(sconn
->source
.in
.nb_list
), &(nb
->rcv_conn_list
));
332 list_add_tail(&(rconn
->target
.out
.nb_list
), &(nb
->snd_conn_list
));
333 mutex_unlock(&(nb
->conn_list_lock
));
336 kref_get(&(rconn
->ref
));
337 kref_get(&(sconn
->ref
));
342 void conn_init_sock_source(struct conn
*conn
)
345 conn
->sourcetype
= SOURCE_SOCK
;
346 memset(&(conn
->source
.sock
), 0, sizeof(conn
->source
.sock
));
347 init_waitqueue_head(&(conn
->source
.sock
.wait
));
350 void conn_init_sock_target(struct conn
*conn
)
353 conn
->targettype
= TARGET_SOCK
;
354 memset(&(conn
->target
.sock
), 0, sizeof(conn
->target
.sock
));
355 init_waitqueue_head(&(conn
->target
.sock
.wait
));
358 struct conn
* alloc_conn(gfp_t allocflags
)
360 struct conn
*rconn
= 0;
361 struct conn
*sconn
= 0;
363 rconn
= kmem_cache_alloc(conn_slab
, allocflags
);
364 if (unlikely(0 == rconn
))
367 sconn
= kmem_cache_alloc(conn_slab
, allocflags
);
368 if (unlikely(0 == sconn
))
371 memset(rconn
, 0, sizeof(struct conn
));
372 memset(sconn
, 0, sizeof(struct conn
));
374 rconn
->reversedir
= sconn
;
375 sconn
->reversedir
= rconn
;
377 kref_init(&(rconn
->ref
));
378 kref_init(&(sconn
->ref
));
380 mutex_init(&(rconn
->rcv_lock
));
381 mutex_init(&(sconn
->rcv_lock
));
383 rconn
->sockstate
= SOCKSTATE_CONN
;
384 sconn
->sockstate
= SOCKSTATE_CONN
;
386 databuf_init(&(rconn
->buf
));
387 databuf_init(&(sconn
->buf
));
389 rconn
->sourcetype
= SOURCE_NONE
;
390 sconn
->sourcetype
= SOURCE_NONE
;
391 rconn
->targettype
= TARGET_UNCONNECTED
;
392 sconn
->targettype
= TARGET_UNCONNECTED
;
394 rconn
->target
.unconnected
.stall_timeout_ms
=
395 CONN_STALL_DEFAULT_TIMEOUT_MS
;
396 sconn
->target
.unconnected
.stall_timeout_ms
=
397 CONN_STALL_DEFAULT_TIMEOUT_MS
;
402 kmem_cache_free(conn_slab
, rconn
);
407 static struct connlistener
*get_connlistener(__be64 port
)
409 struct list_head
*curr
= openports
.next
;
411 while (curr
!= &openports
) {
412 struct bindnode
*currnode
= ((struct bindnode
*)
413 (((char *)curr
) - offsetof(struct bindnode
, lh
)));
414 if (currnode
->port
== port
) {
415 BUG_ON(currnode
->owner
== 0);
416 return currnode
->owner
;
425 void close_port(struct connlistener
*listener
)
427 mutex_lock(&cor_bindnodes
);
429 if (listener
->bn
!= 0) {
430 list_del(&(listener
->bn
->lh
));
431 kmem_cache_free(bindnode_slab
, listener
->bn
);
435 while (list_empty(&(listener
->conn_queue
)) == 0) {
436 struct conn
*rconn
= container_of(listener
->conn_queue
.next
,
437 struct conn
, source
.sock
.cl_list
);
438 rconn
->reversedir
->isreset
= 1;
440 list_del(&(rconn
->source
.sock
.cl_list
));
441 kref_put(&(rconn
->ref
), free_conn
);
444 kmem_cache_free(connlistener_slab
, listener
);
446 mutex_unlock(&cor_bindnodes
);
449 struct connlistener
*open_port(__be64 port
)
452 struct bindnode
*bn
= 0;
453 struct connlistener
*listener
= 0;
455 mutex_lock(&cor_bindnodes
);
456 if (get_connlistener(port
) != 0)
460 bn
= kmem_cache_alloc(bindnode_slab
, GFP_KERNEL
);
461 listener
= kmem_cache_alloc(connlistener_slab
, GFP_KERNEL
);
463 memset(bn
, 0, sizeof(struct bindnode
));
464 memset(listener
, 0, sizeof(struct connlistener
));
466 bn
->owner
= listener
;
469 /* kref is not actually used */
470 listener
->sockstate
= SOCKSTATE_LISTENER
;
472 mutex_init(&(listener
->lock
));
473 INIT_LIST_HEAD(&(listener
->conn_queue
));
474 init_waitqueue_head(&(listener
->wait
));
476 list_add_tail((struct list_head
*) &(bn
->lh
), &openports
);
479 mutex_unlock(&cor_bindnodes
);
486 * rc == 2 port not open
487 * rc == 3 listener queue full
489 int connect_port(struct conn
*rconn
, __be64 port
)
492 struct connlistener
*listener
;
495 mutex_lock(&cor_bindnodes
);
497 listener
= get_connlistener(port
);
503 mutex_lock(&(listener
->lock
));
505 if (listener
->queue_len
>= listener
->queue_maxlen
) {
506 if (listener
->queue_maxlen
<= 0)
514 kref_get(&(rconn
->reversedir
->ref
));
516 conn_init_sock_target(rconn
);
517 conn_init_sock_source(rconn
->reversedir
);
519 list_add_tail(&(rconn
->reversedir
->source
.sock
.cl_list
),
520 &(listener
->conn_queue
));
521 listener
->queue_len
++;
522 wake_up_interruptible(&(listener
->wait
));
525 mutex_unlock(&(listener
->lock
));
528 mutex_unlock(&cor_bindnodes
);
534 * rc == 2 addrtype not found
535 * rc == 3 addr not found
536 * rc == 4 ==> connid allocation failed
537 * rc == 5 ==> control msg alloc failed
539 int connect_neigh(struct conn
*rconn
,
540 __u16 addrtypelen
, __u8
*addrtype
,
541 __u16 addrlen
, __u8
*addr
)
544 struct control_msg_out
*cm
;
545 struct neighbor
*nb
= find_neigh(addrtypelen
, addrtype
, addrlen
, addr
);
548 if (conn_init_out(rconn
, nb
)) {
553 cm
= alloc_control_msg();
554 if (unlikely(cm
== 0)) {
559 send_connect_nb(cm
, nb
, rconn
->reversedir
->source
.in
.conn_id
);
563 kref_put(&(nb
->ref
), neighbor_free
);
569 static void _reset_conn(struct conn
*conn
)
571 if (conn
->isreset
== 1)
574 if (conn
->isreset
== 2 || conn
->isreset
== 3)
577 if (conn
->targettype
== TARGET_OUT
&& conn
->target
.out
.conn_id
!= 0) {
578 struct control_msg_out
*cm
= alloc_control_msg();
580 send_reset_conn(cm
, conn
->target
.out
.nb
,
581 conn
->target
.out
.conn_id
);
586 if (conn
->sourcetype
== SOURCE_IN
) {
587 mutex_lock(&(conn
->source
.in
.nb
->conn_list_lock
));
588 list_del(&(conn
->source
.in
.nb_list
));
589 mutex_unlock(&(conn
->source
.in
.nb
->conn_list_lock
));
591 kref_put(&(conn
->ref
), free_conn
);
593 if (conn
->source
.in
.conn_id
!= 0)
594 htable_delete(&connid_table
, conn
->source
.in
.conn_id
,
595 &(conn
->source
.in
.conn_id
), free_conn
);
598 if (conn
->targettype
== TARGET_OUT
) {
599 mutex_lock(&(conn
->target
.out
.nb
->conn_list_lock
));
600 list_del(&(conn
->target
.out
.nb_list
));
601 mutex_unlock(&(conn
->target
.out
.nb
->conn_list_lock
));
602 kref_put(&(conn
->ref
), free_conn
);
608 /* warning: do not hold the rcv_lock while calling this! */
609 void reset_conn(struct conn
*conn
)
611 printk(KERN_ERR
"reset_conn");
613 _reset_conn(conn
->reversedir
);
616 static int matches_connid_in(void *htentry
, void *searcheditem
)
618 struct conn
*conn
= (struct conn
*) htentry
;
619 __u32 conn_id
= *((__u32
*) searcheditem
);
620 BUG_ON(conn
->sourcetype
!= SOURCE_IN
);
621 return (conn
->source
.in
.conn_id
== conn_id
);
624 static int __init
cor_common_init(void)
630 printk(KERN_ERR
"sizeof conn: %d", sizeof(c
));
631 printk(KERN_ERR
" conn.source: %d", sizeof(c
.source
));
632 printk(KERN_ERR
" conn.target: %d", sizeof(c
.target
));
633 printk(KERN_ERR
" conn.target.out: %d", sizeof(c
.target
.out
));
634 printk(KERN_ERR
" conn.buf: %d", sizeof(c
.buf
));
636 printk(KERN_ERR
" mutex: %d", sizeof(struct mutex
));
637 printk(KERN_ERR
" spinlock: %d", sizeof(spinlock_t
));
638 printk(KERN_ERR
" kref: %d", sizeof(struct kref
));
641 conn_slab
= kmem_cache_create("cor_conn", sizeof(struct conn
), 8, 0, 0);
642 htable_init(&connid_table
, matches_connid_in
,
643 offsetof(struct conn
, source
.in
.htab_entry
),
644 offsetof(struct conn
, ref
));
646 bindnode_slab
= kmem_cache_create("cor_bindnode",
647 sizeof(struct bindnode
), 8, 0, 0);
648 connlistener_slab
= kmem_cache_create("cor_connlistener",
649 sizeof(struct connlistener
), 8, 0, 0);
657 rc
= cor_neighbor_init();
668 module_init(cor_common_init
);
669 MODULE_LICENSE("GPL");